From 7afa61e71877a5b7892ffe4992f804cd84807240 Mon Sep 17 00:00:00 2001 From: Hsiangkai Wang Date: Mon, 6 Sep 2021 21:27:15 +0800 Subject: [PATCH] [RISCV] (2/2) Add the tail policy argument to builtins/intrinsics. Add the tail policy argument to Clang builtins. There are two policies for tail elements. Tail agnostic means users do not care about the values in the tail elements and tail undisturbed means the values in the tail elements need to be kept after the operation. In order to let users control the tail policy, we add an additional argument at the end of the argument list. For unmasked operations, we have no maskedoff and the tail policy is always tail agnostic. If users want to keep tail elements under unmasked operations, they could use all one mask in the masked operations to do it. So, we only add the additional argument for masked operations for most cases. There are exceptions listed below. In this patch, we do not handle the following cases to reduce the complexity of the patch. There could be two separate patches for them. Use dest argument to control tail policy vmerge.vvm/vmerge.vxm/vmerge.vim (add _t builtins with additional dest argument) vfmerge.vfm (add _t builtins with additional dest argument) vmv.v.v (add _t builtins with additional dest argument) vmv.v.x (add _t builtins with additional dest argument) vmv.v.i (add _t builtins with additional dest argument) vfmv.v.f (add _t builtins with additional dest argument) vadc.vvm/vadc.vxm/vadc.vim (add _t builtins with additional dest argument) vsbc.vvm/vsbc.vxm (add _t builtins with additional dest argument) Always has tail argument for masked/unmasked intrinsics Vector Single-Width Integer Multiply-Add Instructions (add _t and _mt builtins) Vector Widening Integer Multiply-Add Instructions (add _t and _mt builtins) Vector Single-Width Floating-Point Fused Multiply-Add Instructions (add _t and _mt builtins) Vector Widening Floating-Point Fused Multiply-Add Instructions (add _t and _mt builtins) Vector Reduction Operations (add _t and _mt builtins) Vector Slideup Instructions (add _t and _mt builtins) Vector Slidedown Instructions (add _t and _mt builtins) Discussion: https://github.com/riscv/rvv-intrinsic-doc/pull/101 Differential Revision: https://reviews.llvm.org/D109322 --- clang/include/clang/Basic/riscv_vector.td | 137 +- .../RISCV/rvv-intrinsics-overloaded/vaadd.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c | 704 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c | 440 +- .../RISCV/rvv-intrinsics-overloaded/vasub.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c | 440 +- .../RISCV/rvv-intrinsics-overloaded/vfabs.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfadd.c | 272 +- .../RISCV/rvv-intrinsics-overloaded/vfcvt.c | 270 +- .../RISCV/rvv-intrinsics-overloaded/vfdiv.c | 90 +- .../RISCV/rvv-intrinsics-overloaded/vfmax.c | 90 +- .../RISCV/rvv-intrinsics-overloaded/vfmin.c | 90 +- .../RISCV/rvv-intrinsics-overloaded/vfmul.c | 90 +- .../RISCV/rvv-intrinsics-overloaded/vfncvt.c | 260 +- .../RISCV/rvv-intrinsics-overloaded/vfneg.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfrdiv.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfrec7.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfrsub.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfsgnj.c | 270 +- .../RISCV/rvv-intrinsics-overloaded/vfslide1down.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfslide1up.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfsqrt.c | 45 +- .../RISCV/rvv-intrinsics-overloaded/vfsub.c | 90 +- .../RISCV/rvv-intrinsics-overloaded/vfwadd.c | 80 +- .../RISCV/rvv-intrinsics-overloaded/vfwcvt.c | 190 +- .../RISCV/rvv-intrinsics-overloaded/vfwmul.c | 40 +- .../RISCV/rvv-intrinsics-overloaded/vfwsub.c | 80 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c | 265 +- .../RISCV/rvv-intrinsics-overloaded/vloxei.c | 955 +- .../RISCV/rvv-intrinsics-overloaded/vloxseg.c | 21697 +++++---- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c | 265 +- .../RISCV/rvv-intrinsics-overloaded/vlseg.c | 5186 +-- .../RISCV/rvv-intrinsics-overloaded/vlsegff.c | 5776 +-- .../RISCV/rvv-intrinsics-overloaded/vlsseg.c | 2072 +- .../RISCV/rvv-intrinsics-overloaded/vluxei.c | 955 +- .../RISCV/rvv-intrinsics-overloaded/vluxseg.c | 21697 +++++---- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c | 1100 +- .../RISCV/rvv-intrinsics-overloaded/vnclip.c | 300 +- .../RISCV/rvv-intrinsics-overloaded/vncvt.c | 150 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c | 110 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c | 220 +- .../RISCV/rvv-intrinsics-overloaded/vnsra.c | 150 +- .../RISCV/rvv-intrinsics-overloaded/vnsrl.c | 150 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c | 440 +- .../RISCV/rvv-intrinsics-overloaded/vrgather.c | 785 +- .../RISCV/rvv-intrinsics-overloaded/vrsub.c | 220 +- .../RISCV/rvv-intrinsics-overloaded/vsadd.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c | 447 +- .../RISCV/rvv-intrinsics-overloaded/vsext.c | 140 +- .../RISCV/rvv-intrinsics-overloaded/vslide1down.c | 220 +- .../RISCV/rvv-intrinsics-overloaded/vslide1up.c | 220 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c | 440 +- .../RISCV/rvv-intrinsics-overloaded/vsmul.c | 220 +- .../RISCV/rvv-intrinsics-overloaded/vsoxei.c | 2095 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c | 220 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c | 220 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c | 547 +- .../RISCV/rvv-intrinsics-overloaded/vssra.c | 220 +- .../RISCV/rvv-intrinsics-overloaded/vssrl.c | 220 +- .../RISCV/rvv-intrinsics-overloaded/vssub.c | 440 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c | 440 +- .../RISCV/rvv-intrinsics-overloaded/vsuxei.c | 2095 +- .../RISCV/rvv-intrinsics-overloaded/vwadd.c | 600 +- .../RISCV/rvv-intrinsics-overloaded/vwcvt.c | 150 +- .../RISCV/rvv-intrinsics-overloaded/vwmul.c | 450 +- .../RISCV/rvv-intrinsics-overloaded/vwsub.c | 600 +- .../CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c | 440 +- .../RISCV/rvv-intrinsics-overloaded/vzext.c | 140 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c | 1259 +- .../CodeGen/RISCV/rvv-intrinsics/vadd-policy.c | 44 + clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c | 973 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c | 1259 +- .../test/CodeGen/RISCV/rvv-intrinsics/vcompress.c | 108 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c | 153 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c | 331 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c | 2293 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c | 588 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c | 588 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c | 523 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c | 523 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c | 346 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c | 2067 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c | 262 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c | 291 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c | 262 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c | 289 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c | 291 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c | 1881 +- .../CodeGen/RISCV/rvv-intrinsics/vfslide1down.c | 262 +- .../test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c | 305 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c | 291 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c | 588 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c | 883 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c | 1703 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c | 373 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c | 883 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c | 833 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c | 1219 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c | 561 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c | 3439 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c | 43636 +++++++++++++------ clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c | 1032 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c | 31211 ++++++++----- clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c | 804 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c | 10987 +++-- clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c | 3440 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c | 43636 +++++++++++++------ clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c | 1146 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c | 486 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c | 431 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c | 434 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c | 491 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c | 486 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c | 486 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c | 2863 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c | 163 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c | 873 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c | 272 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c | 200 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c | 398 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c | 393 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c | 393 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c | 1147 +- .../CodeGen/RISCV/rvv-intrinsics/vreinterpret.c | 433 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c | 2794 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c | 575 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c | 1259 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c | 243 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c | 251 +- .../test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c | 86 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c | 395 +- .../CodeGen/RISCV/rvv-intrinsics/vslide1down.c | 663 +- .../test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c | 641 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c | 622 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c | 958 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c | 8849 ++-- clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c | 575 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c | 574 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c | 242 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c | 933 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c | 622 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c | 637 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c | 933 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c | 1259 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c | 958 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c | 8849 ++-- clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c | 1711 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c | 330 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c | 1173 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c | 1711 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c | 1147 +- clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c | 395 +- clang/utils/TableGen/RISCVVEmitter.cpp | 68 +- 163 files changed, 188956 insertions(+), 108626 deletions(-) create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index fc8e56f..25e583b 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -170,9 +170,23 @@ class RVVBuiltin { } let HasNoMaskedOverloaded = false, + HasPolicy = false, ManualCodegen = [{ IntrinsicTypes = {ResultType, Ops[1]->getType()}; Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); @@ -572,6 +592,19 @@ let HasNoMaskedOverloaded = false, let IRName = "vle1"; let HasMask = false; } +} + +let HasNoMaskedOverloaded = false, + ManualCodegen = [{ + IntrinsicTypes = {ResultType, Ops[1]->getType()}; + Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); + }], + ManualCodegenMask= [{ + // Move mask to right before vl. + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); + IntrinsicTypes = {ResultType, Ops[3]->getType()}; + Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); + }] in { multiclass RVVVLEBuiltin types> { let Name = NAME # "_v", IRName = "vle", @@ -611,7 +644,7 @@ multiclass RVVVLEFFBuiltin types> { ManualCodegenMask = [{ { // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); Value *NewVL = Ops[2]; @@ -648,7 +681,7 @@ multiclass RVVVLSEBuiltin types> { }], ManualCodegenMask= [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -668,7 +701,7 @@ multiclass RVVIndexedLoad { }], ManualCodegenMask = [{ // Move mask to right before vl. - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()}; Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { @@ -688,6 +721,7 @@ multiclass RVVIndexedLoad { } let HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) std::swap(Ops[0], Ops[1]); @@ -705,6 +739,22 @@ let HasMaskedOffOperand = false, let IRName = "vse1"; let HasMask = false; } +} + +let HasMaskedOffOperand = false, + HasPolicy = false, + ManualCodegen = [{ + // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl) + std::swap(Ops[0], Ops[1]); + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); + IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; + }], + ManualCodegenMask= [{ + // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl) + std::swap(Ops[0], Ops[2]); + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); + IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; + }] in { multiclass RVVVSEBuiltin types> { let Name = NAME # "_v", IRName = "vse", @@ -724,6 +774,7 @@ multiclass RVVVSSEBuiltin types> { IRName = "vsse", IRNameMask = "vsse_mask", HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl) std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); @@ -747,6 +798,7 @@ multiclass RVVVSSEBuiltin types> { multiclass RVVIndexedStore { let HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl) std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3); @@ -833,7 +885,8 @@ multiclass RVVUnitStridedSegLoad { Operands.push_back(Ops[2 * NF + 1]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 2]); - assert(Operands.size() == NF + 3); + Operands.push_back(Ops[2 * NF + 3]); + assert(Operands.size() == NF + 4); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -905,8 +958,9 @@ multiclass RVVUnitStridedSegLoadFF { Operands.push_back(Ops[2 * NF + 1]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); + Operands.push_back(Ops[2 * NF + 4]); Value *NewVL = Ops[2 * NF + 2]; - assert(Operands.size() == NF + 3); + assert(Operands.size() == NF + 4); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -978,7 +1032,8 @@ multiclass RVVStridedSegLoad { Operands.push_back(Ops[2 * NF + 2]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - assert(Operands.size() == NF + 4); + Operands.push_back(Ops[2 * NF + 4]); + assert(Operands.size() == NF + 5); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -1044,7 +1099,8 @@ multiclass RVVIndexedSegLoad { Operands.push_back(Ops[2 * NF + 2]); Operands.push_back(Ops[NF]); Operands.push_back(Ops[2 * NF + 3]); - assert(Operands.size() == NF + 4); + Operands.push_back(Ops[2 * NF + 4]); + assert(Operands.size() == NF + 5); llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); clang::CharUnits Align = @@ -1094,6 +1150,7 @@ multiclass RVVUnitStridedSegStore { IRNameMask = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ { // Builtin: (ptr, val0, val1, ..., vl) @@ -1139,6 +1196,7 @@ multiclass RVVStridedSegStore { IRNameMask = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ { // Builtin: (ptr, stride, val0, val1, ..., vl). @@ -1180,6 +1238,7 @@ multiclass RVVIndexedSegStore { IRNameMask = op # nf # "_mask", NF = nf, HasMaskedOffOperand = false, + HasPolicy = false, ManualCodegen = [{ { // Builtin: (ptr, index, val0, val1, ..., vl) @@ -1224,6 +1283,7 @@ multiclass RVVAMOBuiltinSetgetType(), Ops[3]->getType()}; @@ -1258,7 +1318,7 @@ multiclass RVVPseudoUnaryBuiltin { }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -1288,7 +1348,7 @@ multiclass RVVPseudoVNotBuiltin { }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, cast(ResultType)->getElementType(), @@ -1335,7 +1395,7 @@ multiclass RVVPseudoVFUnaryBuiltin { }], ManualCodegenMask = [{ { - std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); + std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2); // maskedoff, op1, mask, vl IntrinsicTypes = {ResultType, Ops[1]->getType(), @@ -1367,7 +1427,7 @@ multiclass RVVPseudoVWCVTBuiltingetType(), @@ -1402,7 +1462,7 @@ multiclass RVVPseudoVNCVTBuiltingetType(), @@ -1423,6 +1483,7 @@ multiclass RVVPseudoVNCVTBuiltin; defm vmadc : RVVIntMaskOutBuiltinSet; @@ -1624,6 +1685,7 @@ defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi", ["Uv", "UvUw"]]>; // 12.8. Vector Integer Comparison Instructions +let HasPolicy = false in { defm vmseq : RVVIntMaskOutBuiltinSet; defm vmsne : RVVIntMaskOutBuiltinSet; defm vmsltu : RVVUnsignedMaskOutBuiltinSet; @@ -1634,6 +1696,7 @@ defm vmsgtu : RVVUnsignedMaskOutBuiltinSet; defm vmsgt : RVVSignedMaskOutBuiltinSet; defm vmsgeu : RVVUnsignedMaskOutBuiltinSet; defm vmsge : RVVSignedMaskOutBuiltinSet; +} // 12.9. Vector Integer Min/Max Instructions defm vminu : RVVUnsignedBinBuiltinSet; @@ -1669,6 +1732,7 @@ defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi", } // 12.13. Vector Single-Width Integer Multiply-Add Instructions +let HasPolicy = false in { defm vmacc : RVVIntTerBuiltinSet; defm vnmsac : RVVIntTerBuiltinSet; defm vmadd : RVVIntTerBuiltinSet; @@ -1689,10 +1753,11 @@ defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi", defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi", [["vx", "w", "wwUev"]]>; } +} // 12.15. Vector Integer Merge Instructions // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl) -let HasMask = false, +let HasMask = false, HasPolicy = false, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1705,7 +1770,7 @@ let HasMask = false, } // 12.16. Vector Integer Move Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { let MangledName = "vmv_v" in { defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil", [["v", "Uv", "UvUv"]]>; @@ -1769,6 +1834,7 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in { } // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions +let HasPolicy = false in { defm vfmacc : RVVFloatingTerBuiltinSet; defm vfnmacc : RVVFloatingTerBuiltinSet; defm vfmsac : RVVFloatingTerBuiltinSet; @@ -1783,6 +1849,7 @@ defm vfwmacc : RVVFloatingWidenTerBuiltinSet; defm vfwnmacc : RVVFloatingWidenTerBuiltinSet; defm vfwmsac : RVVFloatingWidenTerBuiltinSet; defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; +} // 14.8. Vector Floating-Point Square-Root Instruction def vfsqrt : RVVFloatingUnaryVVBuiltin; @@ -1805,20 +1872,22 @@ defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">; defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">; // 14.13. Vector Floating-Point Compare Instructions +let HasPolicy = false in { defm vmfeq : RVVFloatingMaskOutBuiltinSet; defm vmfne : RVVFloatingMaskOutBuiltinSet; defm vmflt : RVVFloatingMaskOutBuiltinSet; defm vmfle : RVVFloatingMaskOutBuiltinSet; defm vmfgt : RVVFloatingMaskOutBuiltinSet; defm vmfge : RVVFloatingMaskOutBuiltinSet; +} // 14.14. Vector Floating-Point Classify Instruction -let Name = "vfclass_v" in +let Name = "vfclass_v", HasPolicy = false in def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">; // 14.15. Vector Floating-Point Merge Instructio // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl) -let HasMask = false, +let HasMask = false, HasPolicy = false, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()}; @@ -1830,7 +1899,7 @@ let HasMask = false, } // 14.16. Vector Floating-Point Move Instruction -let HasMask = false, HasNoMaskedOverloaded = false in +let HasMask = false, HasNoMaskedOverloaded = false, HasPolicy = false in defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd", [["f", "v", "ve"]]>; @@ -1867,6 +1936,7 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { // 15. Vector Reduction Operations // 15.1. Vector Single-Width Integer Reduction Instructions +let HasPolicy = false in { defm vredsum : RVVIntReductionBuiltinSet; defm vredmaxu : RVVUnsignedReductionBuiltin; defm vredmax : RVVSignedReductionBuiltin; @@ -1894,6 +1964,7 @@ defm vfredosum : RVVFloatingReductionBuiltin; // 15.4. Vector Widening Floating-Point Reduction Instructions defm vfwredsum : RVVFloatingWidenReductionBuiltin; defm vfwredosum : RVVFloatingWidenReductionBuiltin; +} // 16. Vector Mask Instructions // 16.1. Vector Mask-Register Logical Instructions @@ -1911,6 +1982,7 @@ def vmset : RVVMaskNullaryBuiltin; defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">; defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">; +let HasPolicy = false in { // 16.2. Vector mask population count vpopc def vpopc : RVVMaskOp0Builtin<"um">; @@ -1934,10 +2006,11 @@ let HasNoMaskedOverloaded = false in { defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"], ["v", "Uv", "Uv"]]>; } +} // 17. Vector Permutation Instructions // 17.1. Integer Scalar Move Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { let HasVL = false, MangledName = "vmv_x" in defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil", [["s", "ve", "ev"], @@ -1949,7 +2022,7 @@ let HasMask = false in { } // 17.2. Floating-Point Scalar Move Instructions -let HasMask = false in { +let HasMask = false, HasPolicy = false in { let HasVL = false, MangledName = "vfmv_f" in defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd", [["s", "ve", "ev"]]>; @@ -1960,10 +2033,12 @@ let HasMask = false in { } // 17.3. Vector Slide Instructions +let HasPolicy = false in { // 17.3.1. Vector Slideup Instructions defm vslideup : RVVSlideBuiltinSet; // 17.3.2. Vector Slidedown Instructions defm vslidedown : RVVSlideBuiltinSet; +} // 17.3.3. Vector Slide1up Instructions defm vslide1up : RVVSlideOneBuiltinSet; @@ -1990,7 +2065,7 @@ defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil", [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>; // 17.5. Vector Compress Instruction -let HasMask = false, +let HasMask = false, HasPolicy = false, ManualCodegen = [{ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3); IntrinsicTypes = {ResultType, Ops[3]->getType()}; @@ -2005,7 +2080,7 @@ let HasMask = false, // Miscellaneous let HasMask = false, HasVL = false, IRName = "" in { - let Name = "vreinterpret_v", + let Name = "vreinterpret_v", HasPolicy = false, ManualCodegen = [{ return Builder.CreateBitCast(Ops[0], ResultType); }] in { @@ -2027,7 +2102,7 @@ let HasMask = false, HasVL = false, IRName = "" in { } } - let Name = "vundefined", HasNoMaskedOverloaded = false, + let Name = "vundefined", HasNoMaskedOverloaded = false, HasPolicy = false, ManualCodegen = [{ return llvm::UndefValue::get(ResultType); }] in { @@ -2037,7 +2112,7 @@ let HasMask = false, HasVL = false, IRName = "" in { // LMUL truncation // C/C++ Operand: VecTy, IR Operand: VecTy, Index - let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", + let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", HasPolicy = false, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -2055,7 +2130,7 @@ let HasMask = false, HasVL = false, IRName = "" in { // LMUL extension // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index - let Name = "vlmul_ext_v", MangledName = "vlmul_ext", + let Name = "vlmul_ext_v", MangledName = "vlmul_ext", HasPolicy = false, ManualCodegen = [{ ID = Intrinsic::experimental_vector_insert; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -2073,7 +2148,7 @@ let HasMask = false, HasVL = false, IRName = "" in { } } - let Name = "vget_v", + let Name = "vget_v", HasPolicy = false, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; @@ -2091,7 +2166,7 @@ let HasMask = false, HasVL = false, IRName = "" in { } } - let Name = "vset_v", Log2LMUL = [0, 1, 2], + let Name = "vset_v", Log2LMUL = [0, 1, 2], HasPolicy = false, ManualCodegen = [{ { ID = Intrinsic::experimental_vector_insert; @@ -2110,3 +2185,9 @@ let HasMask = false, HasVL = false, IRName = "" in { } } } + +let HeaderCode = [{ +#define VE_TAIL_UNDISTURBED 0 +#define VE_TAIL_AGNOSTIC 1 +}] in +def policy : RVVHeader; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c index 1f8361a..b6cee3d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vaadd.c @@ -887,994 +887,906 @@ vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vaaddu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vaadd(mask, maskedoff, op1, op2, vl); + return vaadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vaaddu(mask, maskedoff, op1, op2, vl); + return vaaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c index ae3a230..e04f0e2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vadd.c @@ -5,1763 +5,1411 @@ #include -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf8_t test_vadd_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf8_t test_vadd_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf4_t test_vadd_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf4_t test_vadd_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf2_t test_vadd_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf2_t test_vadd_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m1_t test_vadd_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m2_t test_vadd_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m2_t test_vadd_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m4_t test_vadd_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m4_t test_vadd_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m8_t test_vadd_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m8_t test_vadd_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf4_t test_vadd_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf4_t test_vadd_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf2_t test_vadd_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf2_t test_vadd_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m1_t test_vadd_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m1_t test_vadd_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m2_t test_vadd_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m2_t test_vadd_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m4_t test_vadd_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m4_t test_vadd_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m8_t test_vadd_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m8_t test_vadd_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32mf2_t test_vadd_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32mf2_t test_vadd_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m1_t test_vadd_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m1_t test_vadd_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m2_t test_vadd_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m2_t test_vadd_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m4_t test_vadd_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m4_t test_vadd_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m8_t test_vadd_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m8_t test_vadd_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m1_t test_vadd_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m1_t test_vadd_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m2_t test_vadd_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m2_t test_vadd_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m4_t test_vadd_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m4_t test_vadd_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m8_t test_vadd_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m8_t test_vadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.nxv1i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf8_t test_vadd_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf8_t test_vadd_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.nxv2i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf4_t test_vadd_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf4_t test_vadd_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.nxv4i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf2_t test_vadd_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf2_t test_vadd_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m1_t test_vadd_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m1_t test_vadd_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.nxv16i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m2_t test_vadd_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m2_t test_vadd_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.nxv32i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m4_t test_vadd_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m4_t test_vadd_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.nxv64i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m8_t test_vadd_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv64i8.i8.i64( [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m8_t test_vadd_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.nxv1i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf4_t test_vadd_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf4_t test_vadd_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.nxv2i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf2_t test_vadd_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf2_t test_vadd_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.nxv4i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m1_t test_vadd_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m1_t test_vadd_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.nxv8i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m2_t test_vadd_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m2_t test_vadd_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.nxv16i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m4_t test_vadd_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m4_t test_vadd_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.nxv32i16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m8_t test_vadd_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv32i16.i16.i64( [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m8_t test_vadd_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.nxv1i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32mf2_t test_vadd_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32mf2_t test_vadd_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m1_t test_vadd_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m1_t test_vadd_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m2_t test_vadd_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m2_t test_vadd_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.nxv8i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m4_t test_vadd_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m4_t test_vadd_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.nxv16i32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m8_t test_vadd_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv16i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m8_t test_vadd_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.nxv1i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m1_t test_vadd_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv1i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m1_t test_vadd_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.nxv2i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m2_t test_vadd_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv2i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m2_t test_vadd_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.nxv4i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m4_t test_vadd_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv4i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m4_t test_vadd_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.nxv8i64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m8_t test_vadd_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vadd(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vadd(mask, maskedoff, op1, op2, vl); + return vadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c index c67e269..f46d022 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vand.c @@ -884,882 +884,794 @@ vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vand(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vand(mask, maskedoff, op1, op2, vl); + return vand(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c index 1695e2b..e5acb28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vasub.c @@ -887,994 +887,906 @@ vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vasubu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vasub(mask, maskedoff, op1, op2, vl); + return vasub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vasubu(mask, maskedoff, op1, op2, vl); + return vasubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c index 8788f07..5968a74 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vdiv.c @@ -884,882 +884,794 @@ vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vdivu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vdiv(mask, maskedoff, op1, op2, vl); + return vdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vdivu(mask, maskedoff, op1, op2, vl); + return vdivu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c index 86a7e7049..d0d918c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -95,93 +95,84 @@ vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { return vfabs(op1, vl); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfabs(mask, maskedoff, op1, vl); + return vfabs(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c index 5e0e537..1f6ed0d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfadd.c @@ -242,246 +242,278 @@ vfloat64m8_t test_vfadd_vv_f64m8 (vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfadd(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16mf4_t test_vfadd_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16mf4_t test_vfadd_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16mf2_t test_vfadd_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16mf2_t test_vfadd_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m1_t test_vfadd_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m1_t test_vfadd_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m2_t test_vfadd_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m2_t test_vfadd_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m4_t test_vfadd_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m4_t test_vfadd_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m8_t test_vfadd_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat16m8_t test_vfadd_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32mf2_t test_vfadd_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32mf2_t test_vfadd_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m1_t test_vfadd_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m1_t test_vfadd_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m2_t test_vfadd_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m2_t test_vfadd_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m4_t test_vfadd_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m4_t test_vfadd_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m8_t test_vfadd_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat32m8_t test_vfadd_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m1_t test_vfadd_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m1_t test_vfadd_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m2_t test_vfadd_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m2_t test_vfadd_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m4_t test_vfadd_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m4_t test_vfadd_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m8_t test_vfadd_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfadd(mask, maskedoff, op1, op2, vl); +// +vfloat64m8_t test_vfadd_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c index bd50cad..20534fe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -545,598 +545,544 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { return vfcvt_f(src, vl); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_x(mask, maskedoff, src, vl); + return vfcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x(mask, maskedoff, src, vl); + return vfcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_xu(mask, maskedoff, src, vl); + return vfcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu(mask, maskedoff, src, vl); + return vfcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { - return vfcvt_f(mask, maskedoff, src, vl); + return vfcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c index 5fdf00e..3379924 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfdiv.c @@ -194,209 +194,191 @@ vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfdiv(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfdiv(mask, maskedoff, op1, op2, vl); + return vfdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c index c5c516a..801c774 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmax.c @@ -194,209 +194,191 @@ vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfmax(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmax(mask, maskedoff, op1, op2, vl); + return vfmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c index 46ceaca..c8e3534 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmin.c @@ -194,209 +194,191 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfmin(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmin(mask, maskedoff, op1, op2, vl); + return vfmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c index 4b1d3a8..89f2985 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmul.c @@ -194,209 +194,191 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfmul(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfmul(mask, maskedoff, op1, op2, vl); + return vfmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c index ef9f3fe..a85aded 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -525,590 +525,538 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rod_f(src, vl); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_x(mask, maskedoff, src, vl); + return vfncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_x(mask, maskedoff, src, vl); + return vfncvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_xu(mask, maskedoff, src, vl); + return vfncvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rtz_xu(mask, maskedoff, src, vl); + return vfncvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_f(mask, maskedoff, src, vl); + return vfncvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return vfncvt_rod_f(mask, maskedoff, src, vl); + return vfncvt_rod_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c index cbab1f6..022e18e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -95,93 +95,84 @@ vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { return vfneg(op1, vl); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfneg(mask, maskedoff, op1, vl); + return vfneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c index db1e672..406b8e3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrdiv.c @@ -95,101 +95,92 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfrdiv(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrdiv(mask, maskedoff, op1, op2, vl); + return vfrdiv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c index f044b2e..66fb428 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -95,101 +95,92 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfrec7(op1, vl); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrec7(mask, maskedoff, op1, vl); + return vfrec7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c index b20cd8c..f60e3fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -95,101 +95,92 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfrsqrt7(op1, vl); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfrsqrt7(mask, maskedoff, op1, vl); + return vfrsqrt7(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c index 883ed13..76ae856 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsub.c @@ -95,101 +95,92 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfrsub(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfrsub(mask, maskedoff, op1, op2, vl); + return vfrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c index cb8bf8a..bd531c4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsgnj.c @@ -572,625 +572,571 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfsgnjx(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnj(mask, maskedoff, op1, op2, vl); + return vfsgnj(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjn(mask, maskedoff, op1, op2, vl); + return vfsgnjn(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsgnjx(mask, maskedoff, op1, op2, vl); + return vfsgnjx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c index d14f8b7..b1e76d5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1down.c @@ -104,116 +104,107 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, return vfslide1down(src, value, vl); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1down(mask, maskedoff, src, value, vl); + return vfslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c index ff45b8a..1136e98 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfslide1up.c @@ -104,111 +104,102 @@ vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, return vfslide1up(src, value, vl); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { - return vfslide1up(mask, maskedoff, src, value, vl); + return vfslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c index e7c49f3..bdb0b53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -95,101 +95,92 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfsqrt(op1, vl); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { - return vfsqrt(mask, maskedoff, op1, vl); + return vfsqrt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c index 43a2489..f8c75a7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsub.c @@ -194,209 +194,191 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfsub(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { - return vfsub(mask, maskedoff, op1, op2, vl); + return vfsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c index 26f6b81..6c0c516 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwadd.c @@ -173,186 +173,170 @@ vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { return vfwadd_wf(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_vv(mask, maskedoff, op1, op2, vl); + return vfwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf(mask, maskedoff, op1, op2, vl); + return vfwadd_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwadd_wv(mask, maskedoff, op1, op2, vl); + return vfwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf(mask, maskedoff, op1, op2, vl); + return vfwadd_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c index bea98de..56e0d6c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -385,425 +385,387 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_f(src, vl); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_x(mask, maskedoff, src, vl); + return vfwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_x(mask, maskedoff, src, vl); + return vfwcvt_rtz_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_xu(mask, maskedoff, src, vl); + return vfwcvt_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu(mask, maskedoff, src, vl); + return vfwcvt_rtz_xu(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfwcvt_f(mask, maskedoff, src, vl); + return vfwcvt_f(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c index eb1ba18..5bcde08 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmul.c @@ -89,94 +89,86 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { return vfwmul(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwmul(mask, maskedoff, op1, op2, vl); + return vfwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c index e20c244..1f0bbe8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwsub.c @@ -173,186 +173,170 @@ vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { return vfwsub_wf(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_vv(mask, maskedoff, op1, op2, vl); + return vfwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf(mask, maskedoff, op1, op2, vl); + return vfwsub_vf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return vfwsub_wv(mask, maskedoff, op1, op2, vl); + return vfwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf(mask, maskedoff, op1, op2, vl); + return vfwsub_wf(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c index 1a069c2..11a4f34 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vle.c @@ -5,585 +5,532 @@ #include -// // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { - return vle8(mask, maskedoff, base, vl); + return vle8(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { - return vle16(mask, maskedoff, base, vl); + return vle16(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { - return vle32(mask, maskedoff, base, vl); + return vle32(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { - return vle64(mask, maskedoff, base, vl); + return vle64(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c index e37ddb0..2d9b6cb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxei.c @@ -2107,2104 +2107,1913 @@ vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_ return vloxei64(base, bindex, vl); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vloxei8(mask, maskedoff, base, bindex, vl); + return vloxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vloxei16(mask, maskedoff, base, bindex, vl); + return vloxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vloxei32(mask, maskedoff, base, bindex, vl); + return vloxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vloxei64(mask, maskedoff, base, bindex, vl); + return vloxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c index e5404ad..f3e1de1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg.c @@ -1,9 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ -// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -target-feature +experimental-zvlsseg \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -12335,1585 +12336,3782 @@ void test_vloxseg2ei64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); +void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); +void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); +void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, base, bindex, vl); +void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 @@ -13926,491 +16124,1289 @@ void test_vloxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg2ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14419,13 +17415,13 @@ void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14436,13 +17432,13 @@ void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14455,13 +17451,13 @@ void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14476,13 +17472,13 @@ void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14499,13 +17495,13 @@ void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14524,26 +17520,26 @@ void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14552,13 +17548,13 @@ void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14569,13 +17565,13 @@ void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14588,13 +17584,13 @@ void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14609,13 +17605,13 @@ void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14632,13 +17628,13 @@ void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14657,26 +17653,26 @@ void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14685,13 +17681,13 @@ void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14702,13 +17698,13 @@ void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14721,13 +17717,13 @@ void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14742,13 +17738,13 @@ void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14765,13 +17761,13 @@ void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14790,26 +17786,26 @@ void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14818,13 +17814,13 @@ void test_vloxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14835,13 +17831,13 @@ void test_vloxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14854,13 +17850,13 @@ void test_vloxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14875,13 +17871,13 @@ void test_vloxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14898,13 +17894,13 @@ void test_vloxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14923,26 +17919,26 @@ void test_vloxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14951,13 +17947,13 @@ void test_vloxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14968,39 +17964,26 @@ void test_vloxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15009,13 +17992,13 @@ void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15026,13 +18009,13 @@ void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15045,13 +18028,13 @@ void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15066,13 +18049,13 @@ void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15089,13 +18072,13 @@ void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15114,26 +18097,26 @@ void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15142,13 +18125,13 @@ void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15159,13 +18142,13 @@ void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15178,13 +18161,13 @@ void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15199,13 +18182,13 @@ void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15222,13 +18205,13 @@ void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15247,26 +18230,26 @@ void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15275,13 +18258,13 @@ void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15292,13 +18275,13 @@ void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15311,13 +18294,13 @@ void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15332,13 +18315,13 @@ void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15355,13 +18338,13 @@ void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15380,26 +18363,26 @@ void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15408,13 +18391,13 @@ void test_vloxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15425,13 +18408,13 @@ void test_vloxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15444,13 +18427,13 @@ void test_vloxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15465,13 +18448,13 @@ void test_vloxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15488,13 +18471,13 @@ void test_vloxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15506,1200 +18489,947 @@ void test_vloxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -16708,13 +19438,13 @@ void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -16725,13 +19455,13 @@ void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -16744,13 +19474,13 @@ void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -16765,13 +19495,13 @@ void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -16788,13 +19518,13 @@ void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -16813,26 +19543,26 @@ void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -16841,13 +19571,13 @@ void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -16858,13 +19588,13 @@ void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -16877,13 +19607,13 @@ void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -16898,13 +19628,13 @@ void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -16921,13 +19651,13 @@ void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -16946,26 +19676,26 @@ void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -16974,13 +19704,13 @@ void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -16991,13 +19721,13 @@ void test_vloxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17010,13 +19740,13 @@ void test_vloxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17031,13 +19761,13 @@ void test_vloxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17054,13 +19784,13 @@ void test_vloxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17079,26 +19809,26 @@ void test_vloxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17107,13 +19837,13 @@ void test_vloxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17124,39 +19854,39 @@ void test_vloxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17165,13 +19895,13 @@ void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17182,13 +19912,13 @@ void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17201,13 +19931,13 @@ void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17222,13 +19952,13 @@ void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17245,13 +19975,13 @@ void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17270,26 +20000,26 @@ void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17298,13 +20028,13 @@ void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17315,13 +20045,13 @@ void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17334,13 +20064,13 @@ void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17355,13 +20085,13 @@ void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17378,13 +20108,13 @@ void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17403,26 +20133,26 @@ void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17431,13 +20161,13 @@ void test_vloxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17448,13 +20178,13 @@ void test_vloxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17467,13 +20197,13 @@ void test_vloxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17488,13 +20218,13 @@ void test_vloxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17511,13 +20241,13 @@ void test_vloxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17536,26 +20266,26 @@ void test_vloxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17564,13 +20294,13 @@ void test_vloxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17581,940 +20311,998 @@ void test_vloxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18523,13 +21311,13 @@ void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -18540,13 +21328,13 @@ void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -18559,13 +21347,13 @@ void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -18580,13 +21368,13 @@ void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -18603,13 +21391,13 @@ void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -18628,26 +21416,26 @@ void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18656,13 +21444,13 @@ void test_vloxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -18673,13 +21461,13 @@ void test_vloxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -18692,13 +21480,13 @@ void test_vloxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -18713,13 +21501,13 @@ void test_vloxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -18736,13 +21524,13 @@ void test_vloxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -18761,1820 +21549,2028 @@ void test_vloxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei32_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20583,13 +23579,13 @@ void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20600,13 +23596,13 @@ void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -20619,13 +23615,13 @@ void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -20640,13 +23636,13 @@ void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -20663,13 +23659,13 @@ void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -20688,26 +23684,26 @@ void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20716,13 +23712,13 @@ void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20733,13 +23729,13 @@ void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -20752,13 +23748,13 @@ void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -20773,13 +23769,13 @@ void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -20796,13 +23792,13 @@ void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -20821,26 +23817,26 @@ void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20849,13 +23845,13 @@ void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20866,13 +23862,13 @@ void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -20885,13 +23881,13 @@ void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -20906,13 +23902,13 @@ void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -20929,13 +23925,13 @@ void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -20954,26 +23950,26 @@ void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20982,13 +23978,13 @@ void test_vloxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20999,13 +23995,13 @@ void test_vloxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21018,13 +24014,13 @@ void test_vloxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21039,13 +24035,13 @@ void test_vloxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21062,13 +24058,13 @@ void test_vloxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21087,26 +24083,26 @@ void test_vloxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21115,13 +24111,13 @@ void test_vloxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21132,39 +24128,26 @@ void test_vloxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21173,13 +24156,13 @@ void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21190,13 +24173,13 @@ void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21209,13 +24192,13 @@ void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21230,13 +24213,13 @@ void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21253,13 +24236,13 @@ void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21278,26 +24261,26 @@ void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21306,13 +24289,13 @@ void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21323,13 +24306,13 @@ void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21342,13 +24325,13 @@ void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21363,13 +24346,13 @@ void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21386,13 +24369,13 @@ void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21411,26 +24394,26 @@ void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21439,13 +24422,13 @@ void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21456,13 +24439,13 @@ void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21475,13 +24458,13 @@ void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21496,13 +24479,13 @@ void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21519,13 +24502,13 @@ void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21544,26 +24527,26 @@ void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21572,13 +24555,13 @@ void test_vloxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21589,13 +24572,13 @@ void test_vloxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21608,13 +24591,13 @@ void test_vloxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21629,13 +24612,13 @@ void test_vloxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21652,13 +24635,13 @@ void test_vloxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21677,1193 +24660,1397 @@ void test_vloxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -22872,13 +26059,13 @@ void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -22889,13 +26076,13 @@ void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22908,13 +26095,13 @@ void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -22929,13 +26116,13 @@ void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -22952,13 +26139,13 @@ void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -22977,26 +26164,26 @@ void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -23005,13 +26192,13 @@ void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -23022,13 +26209,13 @@ void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -23041,13 +26228,13 @@ void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -23062,13 +26249,13 @@ void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -23085,13 +26272,13 @@ void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -23110,26 +26297,26 @@ void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -23138,13 +26325,13 @@ void test_vloxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -23155,3590 +26342,4034 @@ void test_vloxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// +void test_vloxseg4ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -26747,13 +30378,13 @@ void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -26764,13 +30395,13 @@ void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -26783,13 +30414,13 @@ void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -26804,13 +30435,13 @@ void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26827,13 +30458,13 @@ void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -26852,26 +30483,26 @@ void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -26880,13 +30511,13 @@ void test_vloxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -26897,13 +30528,13 @@ void test_vloxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -26916,13 +30547,13 @@ void test_vloxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -26937,13 +30568,13 @@ void test_vloxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26960,13 +30591,13 @@ void test_vloxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -26985,26 +30616,26 @@ void test_vloxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27013,13 +30644,13 @@ void test_vloxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27030,39 +30661,39 @@ void test_vloxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27071,13 +30702,13 @@ void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27088,13 +30719,13 @@ void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27107,13 +30738,13 @@ void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27128,13 +30759,13 @@ void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27151,13 +30782,13 @@ void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27176,26 +30807,26 @@ void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27204,13 +30835,13 @@ void test_vloxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27221,13 +30852,13 @@ void test_vloxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27240,13 +30871,13 @@ void test_vloxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27261,13 +30892,13 @@ void test_vloxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27284,13 +30915,13 @@ void test_vloxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27309,26 +30940,26 @@ void test_vloxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27337,13 +30968,13 @@ void test_vloxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27354,39 +30985,39 @@ void test_vloxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27395,13 +31026,13 @@ void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27412,13 +31043,13 @@ void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27431,13 +31062,13 @@ void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27452,13 +31083,13 @@ void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27475,13 +31106,13 @@ void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27500,26 +31131,26 @@ void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27528,13 +31159,13 @@ void test_vloxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27545,13 +31176,13 @@ void test_vloxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27564,13 +31195,13 @@ void test_vloxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27585,13 +31216,13 @@ void test_vloxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27608,13 +31239,13 @@ void test_vloxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27633,26 +31264,26 @@ void test_vloxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27661,13 +31292,13 @@ void test_vloxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27678,39 +31309,39 @@ void test_vloxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27719,13 +31350,13 @@ void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27736,13 +31367,13 @@ void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27755,13 +31386,13 @@ void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27776,13 +31407,13 @@ void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27799,13 +31430,13 @@ void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27824,26 +31455,26 @@ void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27852,13 +31483,13 @@ void test_vloxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27869,13 +31500,13 @@ void test_vloxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27888,13 +31519,13 @@ void test_vloxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27909,13 +31540,13 @@ void test_vloxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27932,13 +31563,13 @@ void test_vloxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27957,26 +31588,26 @@ void test_vloxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27985,13 +31616,13 @@ void test_vloxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28002,39 +31633,39 @@ void test_vloxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28043,13 +31674,13 @@ void test_vloxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28060,13 +31691,13 @@ void test_vloxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28079,13 +31710,13 @@ void test_vloxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28100,13 +31731,13 @@ void test_vloxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28123,13 +31754,13 @@ void test_vloxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28148,26 +31779,26 @@ void test_vloxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28176,13 +31807,13 @@ void test_vloxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28193,39 +31824,39 @@ void test_vloxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28234,13 +31865,13 @@ void test_vloxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28251,13 +31882,13 @@ void test_vloxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28270,13 +31901,13 @@ void test_vloxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28291,13 +31922,13 @@ void test_vloxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28314,13 +31945,13 @@ void test_vloxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28339,26 +31970,26 @@ void test_vloxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28367,13 +31998,13 @@ void test_vloxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28384,39 +32015,39 @@ void test_vloxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28425,13 +32056,13 @@ void test_vloxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28442,13 +32073,13 @@ void test_vloxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28461,13 +32092,13 @@ void test_vloxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28482,13 +32113,13 @@ void test_vloxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28505,13 +32136,13 @@ void test_vloxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28530,26 +32161,26 @@ void test_vloxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28558,13 +32189,13 @@ void test_vloxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28575,39 +32206,39 @@ void test_vloxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28616,13 +32247,13 @@ void test_vloxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28633,13 +32264,13 @@ void test_vloxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28652,13 +32283,13 @@ void test_vloxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28673,13 +32304,13 @@ void test_vloxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28696,13 +32327,13 @@ void test_vloxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28721,26 +32352,26 @@ void test_vloxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28749,13 +32380,13 @@ void test_vloxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28766,20 +32397,20 @@ void test_vloxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c index 18b04a2..2b40306 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlse.c @@ -5,691 +5,638 @@ #include -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlse8(mask, maskedoff, base, bstride, vl); + return vlse8(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlse16(mask, maskedoff, base, bstride, vl); + return vlse16(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { - return vlse32(mask, maskedoff, base, bstride, vl); + return vlse32(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { - return vlse64(mask, maskedoff, base, bstride, vl); + return vlse64(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c index 6bc06a8..0b57dfc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlseg.c @@ -1,54 +1,29 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zvlsseg \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -57,26 +32,13 @@ void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vi // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -87,28 +49,13 @@ void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -121,30 +68,13 @@ void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -159,32 +89,13 @@ void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -201,34 +112,13 @@ void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -247,46 +137,26 @@ void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vi // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -295,26 +165,13 @@ void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vi // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -325,28 +182,13 @@ void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -359,30 +201,13 @@ void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -397,32 +222,13 @@ void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -439,34 +245,13 @@ void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -485,46 +270,26 @@ void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vi // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -533,26 +298,13 @@ void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vi // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -563,28 +315,13 @@ void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -597,30 +334,13 @@ void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -635,32 +355,13 @@ void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -677,34 +378,13 @@ void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -723,46 +403,26 @@ void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vi // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -771,26 +431,13 @@ void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -801,28 +448,13 @@ void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -835,30 +467,13 @@ void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -873,32 +488,13 @@ void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -915,34 +511,13 @@ void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -961,46 +536,26 @@ void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1009,26 +564,13 @@ void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1039,68 +581,39 @@ void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1109,26 +622,13 @@ void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1139,28 +639,13 @@ void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1173,30 +658,13 @@ void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1211,32 +679,13 @@ void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1253,34 +702,13 @@ void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1299,46 +727,26 @@ void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1347,26 +755,13 @@ void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1377,28 +772,13 @@ void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1411,30 +791,13 @@ void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1449,32 +812,13 @@ void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1491,34 +835,13 @@ void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1537,46 +860,26 @@ void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1585,26 +888,13 @@ void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1615,28 +905,13 @@ void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1649,30 +924,13 @@ void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1687,32 +945,13 @@ void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1729,34 +968,13 @@ void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1775,46 +993,26 @@ void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1823,26 +1021,13 @@ void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1853,68 +1038,39 @@ void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1923,26 +1079,13 @@ void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1953,28 +1096,13 @@ void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1987,30 +1115,13 @@ void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2025,32 +1136,13 @@ void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2067,34 +1159,13 @@ void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2113,46 +1184,26 @@ void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2161,26 +1212,13 @@ void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2191,28 +1229,13 @@ void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2225,30 +1248,13 @@ void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2263,32 +1269,13 @@ void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2305,34 +1292,13 @@ void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2351,46 +1317,26 @@ void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2399,26 +1345,13 @@ void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2429,68 +1362,39 @@ void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2499,26 +1403,13 @@ void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2529,28 +1420,13 @@ void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2563,30 +1439,13 @@ void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2601,32 +1460,13 @@ void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2643,34 +1483,13 @@ void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2689,46 +1508,26 @@ void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2737,26 +1536,13 @@ void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2767,68 +1553,39 @@ void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2837,26 +1594,13 @@ void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2867,28 +1611,13 @@ void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2901,30 +1630,13 @@ void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2939,32 +1651,13 @@ void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2981,34 +1674,13 @@ void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3027,46 +1699,26 @@ void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3075,26 +1727,13 @@ void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3105,28 +1744,13 @@ void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3139,30 +1763,13 @@ void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3177,32 +1784,13 @@ void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3219,34 +1807,13 @@ void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3265,46 +1832,26 @@ void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3313,26 +1860,13 @@ void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3343,28 +1877,13 @@ void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3377,30 +1896,13 @@ void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3415,32 +1917,13 @@ void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3457,34 +1940,13 @@ void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3503,46 +1965,26 @@ void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3551,26 +1993,13 @@ void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuin // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3581,28 +2010,13 @@ void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbo // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3615,30 +2029,13 @@ void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3653,32 +2050,13 @@ void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3695,34 +2073,13 @@ void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3741,46 +2098,26 @@ void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vui // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3789,26 +2126,13 @@ void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuin // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3819,68 +2143,39 @@ void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbo // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3889,26 +2184,13 @@ void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3919,28 +2201,13 @@ void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3953,30 +2220,13 @@ void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3991,32 +2241,13 @@ void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4033,34 +2264,13 @@ void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4079,46 +2289,26 @@ void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4127,26 +2317,13 @@ void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4157,28 +2334,13 @@ void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4191,30 +2353,13 @@ void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4229,32 +2374,13 @@ void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4271,34 +2397,13 @@ void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4317,46 +2422,26 @@ void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4365,26 +2450,13 @@ void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4395,28 +2467,13 @@ void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4429,30 +2486,13 @@ void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4467,32 +2507,13 @@ void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4509,34 +2530,13 @@ void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4555,46 +2555,26 @@ void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4603,26 +2583,13 @@ void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4633,68 +2600,39 @@ void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4703,26 +2641,13 @@ void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4733,28 +2658,13 @@ void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -4767,30 +2677,13 @@ void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -4805,32 +2698,13 @@ void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -4847,34 +2721,13 @@ void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -4893,46 +2746,26 @@ void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -4941,26 +2774,13 @@ void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -4971,28 +2791,13 @@ void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5005,30 +2810,13 @@ void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5043,32 +2831,13 @@ void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5085,34 +2854,13 @@ void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5131,46 +2879,26 @@ void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5179,26 +2907,13 @@ void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5209,68 +2924,39 @@ void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5279,26 +2965,13 @@ void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5309,28 +2982,13 @@ void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5343,30 +3001,13 @@ void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5381,32 +3022,13 @@ void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5423,34 +3045,13 @@ void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5469,46 +3070,26 @@ void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5517,26 +3098,13 @@ void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5547,68 +3115,496 @@ void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( +void test_vlseg5e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5617,26 +3613,13 @@ void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5647,28 +3630,13 @@ void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5681,30 +3649,13 @@ void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl, uint8_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5719,32 +3670,13 @@ void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl, uint8_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5761,34 +3693,13 @@ void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl, uint8_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -5807,46 +3718,26 @@ void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl, uint8_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -5855,26 +3746,13 @@ void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -5885,28 +3763,13 @@ void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -5919,30 +3782,13 @@ void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl, uint8_t ta) { + return vlseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -5957,32 +3803,13 @@ void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl, uint8_t ta) { + return vlseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -5999,34 +3826,13 @@ void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl, uint8_t ta) { + return vlseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6045,46 +3851,26 @@ void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl, uint8_t ta) { + return vlseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6093,26 +3879,13 @@ void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl, uint8_t ta) { + return vlseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6123,68 +3896,39 @@ void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl, uint8_t ta) { + return vlseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6193,26 +3937,13 @@ void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl, uint8_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6223,28 +3954,13 @@ void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl, uint8_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -6257,30 +3973,13 @@ void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl, uint8_t ta) { + return vlseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -6295,32 +3994,13 @@ void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl, uint8_t ta) { + return vlseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -6337,34 +4017,13 @@ void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl, uint8_t ta) { + return vlseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -6383,46 +4042,26 @@ void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl, uint8_t ta) { + return vlseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -6431,26 +4070,13 @@ void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl, uint8_t ta) { + return vlseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -6461,28 +4087,20 @@ void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl, uint8_t ta) { + return vlseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl, uint8_t ta) { + return vlseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c index 5f8b5db..5dacd8f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c @@ -1,32 +1,16 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zvlsseg \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -35,26 +19,13 @@ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -65,28 +36,13 @@ void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -99,30 +55,13 @@ void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -137,32 +76,13 @@ void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -179,34 +99,13 @@ void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -225,36 +124,13 @@ void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -275,24 +151,13 @@ void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -301,26 +166,13 @@ void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -331,28 +183,13 @@ void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -365,30 +202,13 @@ void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -403,32 +223,13 @@ void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -445,34 +246,13 @@ void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -491,36 +271,13 @@ void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -541,24 +298,13 @@ void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -567,26 +313,13 @@ void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -597,28 +330,13 @@ void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -631,30 +349,13 @@ void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -669,32 +370,13 @@ void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -711,34 +393,13 @@ void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -757,36 +418,13 @@ void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -807,24 +445,13 @@ void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -833,26 +460,13 @@ void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -863,28 +477,13 @@ void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -897,30 +496,13 @@ void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -935,32 +517,13 @@ void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -977,34 +540,13 @@ void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1023,36 +565,13 @@ void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1073,24 +592,13 @@ void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1099,26 +607,13 @@ void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1129,28 +624,13 @@ void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1163,24 +643,13 @@ void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1189,24 +658,13 @@ void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1215,26 +673,13 @@ void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1245,28 +690,13 @@ void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1279,30 +709,13 @@ void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1317,32 +730,13 @@ void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1359,34 +753,13 @@ void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1405,36 +778,13 @@ void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1455,24 +805,13 @@ void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1481,26 +820,13 @@ void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1511,28 +837,13 @@ void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1545,30 +856,13 @@ void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1583,32 +877,13 @@ void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1625,34 +900,13 @@ void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1671,36 +925,13 @@ void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1721,24 +952,13 @@ void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -1747,26 +967,13 @@ void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -1777,28 +984,13 @@ void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -1811,30 +1003,13 @@ void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -1849,32 +1024,13 @@ void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -1891,34 +1047,13 @@ void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -1937,36 +1072,13 @@ void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -1987,24 +1099,13 @@ void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2013,26 +1114,13 @@ void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2043,28 +1131,13 @@ void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2077,24 +1150,13 @@ void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2103,24 +1165,13 @@ void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2129,26 +1180,13 @@ void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2159,28 +1197,13 @@ void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2193,30 +1216,13 @@ void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2231,32 +1237,13 @@ void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2273,34 +1260,13 @@ void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2319,36 +1285,13 @@ void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -2369,24 +1312,13 @@ void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2395,26 +1327,13 @@ void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2425,28 +1344,13 @@ void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2459,30 +1363,13 @@ void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2497,32 +1384,13 @@ void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2539,34 +1407,13 @@ void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2585,36 +1432,13 @@ void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -2635,24 +1459,13 @@ void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2661,26 +1474,13 @@ void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2691,28 +1491,13 @@ void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2725,24 +1510,13 @@ void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2751,24 +1525,13 @@ void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -2777,26 +1540,13 @@ void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -2807,28 +1557,13 @@ void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -2841,30 +1576,13 @@ void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -2879,32 +1597,13 @@ void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -2921,34 +1620,13 @@ void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -2967,36 +1645,13 @@ void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3017,24 +1672,13 @@ void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3043,26 +1687,13 @@ void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3073,28 +1704,13 @@ void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3107,24 +1723,13 @@ void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3133,24 +1738,13 @@ void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3159,26 +1753,13 @@ void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3189,28 +1770,13 @@ void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3223,30 +1789,13 @@ void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3261,32 +1810,13 @@ void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3303,34 +1833,13 @@ void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3349,36 +1858,13 @@ void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3399,24 +1885,13 @@ void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3425,26 +1900,13 @@ void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3455,28 +1917,13 @@ void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3489,30 +1936,13 @@ void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3527,32 +1957,13 @@ void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3569,34 +1980,13 @@ void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3615,36 +2005,13 @@ void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3665,24 +2032,13 @@ void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3691,26 +2047,13 @@ void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3721,28 +2064,13 @@ void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -3755,30 +2083,13 @@ void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -3793,32 +2104,13 @@ void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -3835,34 +2127,13 @@ void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -3881,36 +2152,13 @@ void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -3931,24 +2179,13 @@ void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -3957,26 +2194,13 @@ void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -3987,28 +2211,13 @@ void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4021,30 +2230,13 @@ void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4059,32 +2251,13 @@ void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4101,34 +2274,13 @@ void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4147,36 +2299,13 @@ void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4197,24 +2326,13 @@ void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4223,26 +2341,13 @@ void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4253,28 +2358,13 @@ void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e8ff_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4287,24 +2377,13 @@ void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e8ff_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4313,24 +2392,13 @@ void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { - return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e8ff_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4339,26 +2407,13 @@ void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4369,28 +2424,13 @@ void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4403,30 +2443,13 @@ void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4441,32 +2464,13 @@ void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4483,34 +2487,13 @@ void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4529,36 +2512,13 @@ void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4579,24 +2539,13 @@ void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4605,26 +2554,13 @@ void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4635,28 +2571,13 @@ void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4669,30 +2590,13 @@ void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4707,32 +2611,13 @@ void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -4749,34 +2634,13 @@ void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -4795,36 +2659,13 @@ void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -4845,24 +2686,13 @@ void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -4871,26 +2701,13 @@ void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -4901,28 +2718,13 @@ void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -4935,30 +2737,13 @@ void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -4973,32 +2758,13 @@ void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5015,34 +2781,13 @@ void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5061,36 +2806,13 @@ void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5111,24 +2833,13 @@ void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5137,26 +2848,13 @@ void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5167,28 +2865,13 @@ void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e16ff_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5201,24 +2884,13 @@ void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e16ff_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5227,24 +2899,13 @@ void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { - return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e16ff_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5253,26 +2914,13 @@ void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5283,28 +2931,13 @@ void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5317,30 +2950,13 @@ void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -5355,32 +2971,13 @@ void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5397,34 +2994,13 @@ void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5443,36 +3019,13 @@ void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5493,24 +3046,13 @@ void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5519,26 +3061,13 @@ void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5549,28 +3078,13 @@ void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5583,30 +3097,13 @@ void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -5621,32 +3118,13 @@ void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -5663,34 +3141,13 @@ void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -5709,36 +3166,13 @@ void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -5759,24 +3193,13 @@ void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5785,26 +3208,13 @@ void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5815,28 +3225,13 @@ void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5849,24 +3244,13 @@ void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5875,24 +3259,13 @@ void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -5901,26 +3274,13 @@ void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -5931,28 +3291,13 @@ void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -5965,30 +3310,13 @@ void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6003,32 +3331,13 @@ void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6045,34 +3354,13 @@ void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6091,36 +3379,13 @@ void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6141,24 +3406,13 @@ void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6167,26 +3421,13 @@ void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6197,28 +3438,13 @@ void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6231,24 +3457,13 @@ void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6257,24 +3472,520 @@ void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +void test_vlseg5e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6283,26 +3994,13 @@ void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6313,28 +4011,13 @@ void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6347,30 +4030,13 @@ void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6385,32 +4051,13 @@ void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6427,34 +4074,13 @@ void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6473,36 +4099,13 @@ void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6523,24 +4126,13 @@ void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6549,26 +4141,13 @@ void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6579,28 +4158,13 @@ void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6613,30 +4177,13 @@ void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -6651,32 +4198,13 @@ void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { - return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -6693,34 +4221,13 @@ void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { - return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -6739,36 +4246,13 @@ void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { - return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -6789,24 +4273,13 @@ void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { - return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6815,26 +4288,13 @@ void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6845,28 +4305,13 @@ void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { - return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e32ff_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6879,24 +4324,13 @@ void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { - return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e32ff_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6905,24 +4339,13 @@ void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { - return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e32ff_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -6931,26 +4354,13 @@ void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -6961,28 +4371,13 @@ void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -6995,30 +4390,13 @@ void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store i32 [[TMP6]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7033,32 +4411,13 @@ void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { - return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store i32 [[TMP7]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg5e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e64ff(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7075,34 +4434,13 @@ void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { - return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store i32 [[TMP8]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg6e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e64ff(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7121,36 +4459,13 @@ void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { - return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); -} - -// CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 8 -// CHECK-RV32-NEXT: store i32 [[TMP9]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +void test_vlseg7e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e64ff(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7171,24 +4486,13 @@ void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { - return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +void test_vlseg8e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e64ff(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7197,26 +4501,13 @@ void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store i32 [[TMP4]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7227,28 +4518,13 @@ void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { - return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +void test_vlseg3e64ff_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 4 -// CHECK-RV32-NEXT: store i32 [[TMP5]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7261,24 +4537,13 @@ void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { - return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +void test_vlseg4e64ff_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , i32 } [[TMP0]], 2 -// CHECK-RV32-NEXT: store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7287,6 +4552,7 @@ void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { - return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +void test_vlseg2e64ff_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c index 7658c25..cf23688 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c @@ -1,28 +1,29 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ -// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -target-feature +experimental-zvlsseg \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -31,13 +32,13 @@ void test_vlsseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -48,13 +49,13 @@ void test_vlsseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -67,13 +68,13 @@ void test_vlsseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -88,13 +89,13 @@ void test_vlsseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -111,13 +112,13 @@ void test_vlsseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -136,26 +137,26 @@ void test_vlsseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -164,13 +165,13 @@ void test_vlsseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -181,13 +182,13 @@ void test_vlsseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -200,13 +201,13 @@ void test_vlsseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -221,13 +222,13 @@ void test_vlsseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -244,13 +245,13 @@ void test_vlsseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -269,26 +270,26 @@ void test_vlsseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -297,13 +298,13 @@ void test_vlsseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -314,13 +315,13 @@ void test_vlsseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -333,13 +334,13 @@ void test_vlsseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -354,13 +355,13 @@ void test_vlsseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -377,13 +378,13 @@ void test_vlsseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -402,26 +403,26 @@ void test_vlsseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -430,13 +431,13 @@ void test_vlsseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -447,13 +448,13 @@ void test_vlsseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -466,13 +467,13 @@ void test_vlsseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -487,13 +488,13 @@ void test_vlsseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -510,13 +511,13 @@ void test_vlsseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -535,26 +536,26 @@ void test_vlsseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -563,13 +564,13 @@ void test_vlsseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -580,39 +581,39 @@ void test_vlsseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -621,13 +622,13 @@ void test_vlsseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -638,13 +639,13 @@ void test_vlsseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -657,13 +658,13 @@ void test_vlsseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -678,13 +679,13 @@ void test_vlsseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -701,13 +702,13 @@ void test_vlsseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -726,26 +727,26 @@ void test_vlsseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -754,13 +755,13 @@ void test_vlsseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -771,13 +772,13 @@ void test_vlsseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -790,13 +791,13 @@ void test_vlsseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -811,13 +812,13 @@ void test_vlsseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -834,13 +835,13 @@ void test_vlsseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -859,26 +860,26 @@ void test_vlsseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -887,13 +888,13 @@ void test_vlsseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -904,13 +905,13 @@ void test_vlsseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -923,13 +924,13 @@ void test_vlsseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -944,13 +945,13 @@ void test_vlsseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -967,13 +968,13 @@ void test_vlsseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -992,26 +993,26 @@ void test_vlsseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1020,13 +1021,13 @@ void test_vlsseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1037,39 +1038,39 @@ void test_vlsseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1078,13 +1079,13 @@ void test_vlsseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1095,13 +1096,13 @@ void test_vlsseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1114,13 +1115,13 @@ void test_vlsseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1135,13 +1136,13 @@ void test_vlsseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1158,13 +1159,13 @@ void test_vlsseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1183,26 +1184,26 @@ void test_vlsseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1211,13 +1212,13 @@ void test_vlsseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1228,13 +1229,13 @@ void test_vlsseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1247,13 +1248,13 @@ void test_vlsseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1268,13 +1269,13 @@ void test_vlsseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1291,13 +1292,13 @@ void test_vlsseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1316,26 +1317,26 @@ void test_vlsseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1344,13 +1345,13 @@ void test_vlsseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1361,39 +1362,39 @@ void test_vlsseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1402,13 +1403,13 @@ void test_vlsseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1419,13 +1420,13 @@ void test_vlsseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1438,13 +1439,13 @@ void test_vlsseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1459,13 +1460,13 @@ void test_vlsseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1482,13 +1483,13 @@ void test_vlsseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1507,26 +1508,26 @@ void test_vlsseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1535,13 +1536,13 @@ void test_vlsseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1552,39 +1553,39 @@ void test_vlsseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1593,13 +1594,13 @@ void test_vlsseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1610,13 +1611,13 @@ void test_vlsseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1629,13 +1630,13 @@ void test_vlsseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1650,13 +1651,13 @@ void test_vlsseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1673,13 +1674,13 @@ void test_vlsseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1698,26 +1699,26 @@ void test_vlsseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1726,13 +1727,13 @@ void test_vlsseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1743,13 +1744,13 @@ void test_vlsseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1762,13 +1763,13 @@ void test_vlsseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1783,13 +1784,13 @@ void test_vlsseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1806,13 +1807,13 @@ void test_vlsseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1831,26 +1832,26 @@ void test_vlsseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1859,13 +1860,13 @@ void test_vlsseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -1876,13 +1877,13 @@ void test_vlsseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -1895,13 +1896,13 @@ void test_vlsseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -1916,13 +1917,13 @@ void test_vlsseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -1939,13 +1940,13 @@ void test_vlsseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -1964,26 +1965,26 @@ void test_vlsseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -1992,13 +1993,13 @@ void test_vlsseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vui // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2009,13 +2010,13 @@ void test_vlsseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2028,13 +2029,13 @@ void test_vlsseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2049,13 +2050,13 @@ void test_vlsseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2072,13 +2073,13 @@ void test_vlsseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2097,26 +2098,26 @@ void test_vlsseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vu // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2125,13 +2126,13 @@ void test_vlsseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vui // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2142,39 +2143,39 @@ void test_vlsseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2183,13 +2184,13 @@ void test_vlsseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2200,13 +2201,13 @@ void test_vlsseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2219,13 +2220,13 @@ void test_vlsseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2240,13 +2241,13 @@ void test_vlsseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2263,13 +2264,13 @@ void test_vlsseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2288,26 +2289,26 @@ void test_vlsseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2316,13 +2317,13 @@ void test_vlsseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2333,13 +2334,13 @@ void test_vlsseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2352,13 +2353,13 @@ void test_vlsseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2373,13 +2374,13 @@ void test_vlsseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2396,13 +2397,13 @@ void test_vlsseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2421,26 +2422,26 @@ void test_vlsseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2449,13 +2450,13 @@ void test_vlsseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2466,13 +2467,13 @@ void test_vlsseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2485,13 +2486,13 @@ void test_vlsseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2506,13 +2507,13 @@ void test_vlsseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2529,13 +2530,13 @@ void test_vlsseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2554,26 +2555,26 @@ void test_vlsseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2582,13 +2583,13 @@ void test_vlsseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2599,39 +2600,39 @@ void test_vlsseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2640,13 +2641,13 @@ void test_vlsseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2657,13 +2658,13 @@ void test_vlsseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2676,13 +2677,13 @@ void test_vlsseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2697,13 +2698,13 @@ void test_vlsseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2720,13 +2721,13 @@ void test_vlsseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2745,26 +2746,26 @@ void test_vlsseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2773,13 +2774,13 @@ void test_vlsseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2790,13 +2791,13 @@ void test_vlsseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -2809,13 +2810,13 @@ void test_vlsseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -2830,13 +2831,13 @@ void test_vlsseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -2853,13 +2854,13 @@ void test_vlsseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -2878,26 +2879,26 @@ void test_vlsseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2906,13 +2907,13 @@ void test_vlsseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2923,39 +2924,39 @@ void test_vlsseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -2964,13 +2965,13 @@ void test_vlsseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -2981,13 +2982,13 @@ void test_vlsseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3000,13 +3001,13 @@ void test_vlsseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3021,13 +3022,13 @@ void test_vlsseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3044,13 +3045,13 @@ void test_vlsseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3069,26 +3070,26 @@ void test_vlsseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3097,13 +3098,13 @@ void test_vlsseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3114,39 +3115,496 @@ void test_vlsseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3155,13 +3613,13 @@ void test_vlsseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3172,13 +3630,13 @@ void test_vlsseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3191,13 +3649,13 @@ void test_vlsseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3212,13 +3670,13 @@ void test_vlsseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3235,13 +3693,13 @@ void test_vlsseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3260,26 +3718,26 @@ void test_vlsseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3288,13 +3746,13 @@ void test_vlsseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3305,13 +3763,13 @@ void test_vlsseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3324,13 +3782,13 @@ void test_vlsseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3345,13 +3803,13 @@ void test_vlsseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3368,13 +3826,13 @@ void test_vlsseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3393,26 +3851,26 @@ void test_vlsseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3421,13 +3879,13 @@ void test_vlsseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3438,39 +3896,39 @@ void test_vlsseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3479,13 +3937,13 @@ void test_vlsseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3496,13 +3954,13 @@ void test_vlsseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -3515,13 +3973,13 @@ void test_vlsseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -3536,13 +3994,13 @@ void test_vlsseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -3559,13 +4017,13 @@ void test_vlsseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -3584,26 +4042,26 @@ void test_vlsseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -3612,13 +4070,13 @@ void test_vlsseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -3629,20 +4087,20 @@ void test_vlsseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c index 5363c07..07c235e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxei.c @@ -2106,2103 +2106,1912 @@ vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_ return vluxei64(base, bindex, vl); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { - return vluxei8(mask, maskedoff, base, bindex, vl); + return vluxei8(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { - return vluxei16(mask, maskedoff, base, bindex, vl); + return vluxei16(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { - return vluxei32(mask, maskedoff, base, bindex, vl); + return vluxei32(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { - return vluxei64(mask, maskedoff, base, bindex, vl); + return vluxei64(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c index bc7f7ea..86ec6f8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg.c @@ -1,9 +1,10 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone -emit-llvm %s \ -// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -target-feature +experimental-zvlsseg \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -12335,1585 +12336,3782 @@ void test_vluxseg2ei64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { +void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { +void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { +void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { +void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); +void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); +void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { +void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { +void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { +void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); +void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, base, bindex, vl); +void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 @@ -13926,491 +16124,1289 @@ void test_vluxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg2ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14419,13 +17415,13 @@ void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14436,13 +17432,13 @@ void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14455,13 +17451,13 @@ void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14476,13 +17472,13 @@ void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14499,13 +17495,13 @@ void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14524,26 +17520,26 @@ void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14552,13 +17548,13 @@ void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14569,13 +17565,13 @@ void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14588,13 +17584,13 @@ void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14609,13 +17605,13 @@ void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14632,13 +17628,13 @@ void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14657,26 +17653,26 @@ void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14685,13 +17681,13 @@ void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14702,13 +17698,13 @@ void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14721,13 +17717,13 @@ void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14742,13 +17738,13 @@ void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14765,13 +17761,13 @@ void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14790,26 +17786,26 @@ void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14818,13 +17814,13 @@ void test_vluxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vin // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14835,13 +17831,13 @@ void test_vluxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbo // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -14854,13 +17850,13 @@ void test_vluxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -14875,13 +17871,13 @@ void test_vluxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -14898,13 +17894,13 @@ void test_vluxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -14923,26 +17919,26 @@ void test_vluxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vin // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -14951,13 +17947,13 @@ void test_vluxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vin // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -14968,39 +17964,26 @@ void test_vluxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbo // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15009,13 +17992,13 @@ void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15026,13 +18009,13 @@ void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15045,13 +18028,13 @@ void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15066,13 +18049,13 @@ void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15089,13 +18072,13 @@ void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15114,26 +18097,26 @@ void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15142,13 +18125,13 @@ void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15159,13 +18142,13 @@ void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15178,13 +18161,13 @@ void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15199,13 +18182,13 @@ void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15222,13 +18205,13 @@ void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15247,26 +18230,26 @@ void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15275,13 +18258,13 @@ void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15292,13 +18275,13 @@ void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15311,13 +18294,13 @@ void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15332,13 +18315,13 @@ void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15355,13 +18338,13 @@ void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15380,26 +18363,26 @@ void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -15408,13 +18391,13 @@ void test_vluxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vi // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -15425,13 +18408,13 @@ void test_vluxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vb // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -15444,13 +18427,13 @@ void test_vluxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -15465,13 +18448,13 @@ void test_vluxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -15488,13 +18471,13 @@ void test_vluxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -15506,1200 +18489,947 @@ void test_vluxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vi // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -16708,13 +19438,13 @@ void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -16725,13 +19455,13 @@ void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -16744,13 +19474,13 @@ void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -16765,13 +19495,13 @@ void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -16788,13 +19518,13 @@ void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -16813,26 +19543,26 @@ void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -16841,13 +19571,13 @@ void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -16858,13 +19588,13 @@ void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -16877,13 +19607,13 @@ void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -16898,13 +19628,13 @@ void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -16921,13 +19651,13 @@ void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -16946,26 +19676,26 @@ void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -16974,13 +19704,13 @@ void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -16991,13 +19721,13 @@ void test_vluxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17010,13 +19740,13 @@ void test_vluxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17031,13 +19761,13 @@ void test_vluxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17054,13 +19784,13 @@ void test_vluxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17079,26 +19809,26 @@ void test_vluxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17107,13 +19837,13 @@ void test_vluxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17124,39 +19854,39 @@ void test_vluxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17165,13 +19895,13 @@ void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17182,13 +19912,13 @@ void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17201,13 +19931,13 @@ void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17222,13 +19952,13 @@ void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17245,13 +19975,13 @@ void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17270,26 +20000,26 @@ void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17298,13 +20028,13 @@ void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17315,13 +20045,13 @@ void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17334,13 +20064,13 @@ void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17355,13 +20085,13 @@ void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17378,13 +20108,13 @@ void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17403,26 +20133,26 @@ void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17431,13 +20161,13 @@ void test_vluxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17448,13 +20178,13 @@ void test_vluxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -17467,13 +20197,13 @@ void test_vluxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -17488,13 +20218,13 @@ void test_vluxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -17511,13 +20241,13 @@ void test_vluxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -17536,26 +20266,26 @@ void test_vluxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -17564,13 +20294,13 @@ void test_vluxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -17581,940 +20311,998 @@ void test_vluxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18523,13 +21311,13 @@ void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -18540,13 +21328,13 @@ void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -18559,13 +21347,13 @@ void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -18580,13 +21368,13 @@ void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -18603,13 +21391,13 @@ void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -18628,26 +21416,26 @@ void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -18656,13 +21444,13 @@ void test_vluxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -18673,13 +21461,13 @@ void test_vluxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -18692,13 +21480,13 @@ void test_vluxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -18713,13 +21501,13 @@ void test_vluxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -18736,13 +21524,13 @@ void test_vluxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -18761,1820 +21549,2028 @@ void test_vluxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20583,13 +23579,13 @@ void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20600,13 +23596,13 @@ void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -20619,13 +23615,13 @@ void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -20640,13 +23636,13 @@ void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -20663,13 +23659,13 @@ void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -20688,26 +23684,26 @@ void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20716,13 +23712,13 @@ void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20733,13 +23729,13 @@ void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -20752,13 +23748,13 @@ void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -20773,13 +23769,13 @@ void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -20796,13 +23792,13 @@ void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -20821,26 +23817,26 @@ void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20849,13 +23845,13 @@ void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20866,13 +23862,13 @@ void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -20885,13 +23881,13 @@ void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -20906,13 +23902,13 @@ void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -20929,13 +23925,13 @@ void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -20954,26 +23950,26 @@ void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -20982,13 +23978,13 @@ void test_vluxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -20999,13 +23995,13 @@ void test_vluxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21018,13 +24014,13 @@ void test_vluxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21039,13 +24035,13 @@ void test_vluxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21062,13 +24058,13 @@ void test_vluxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21087,26 +24083,26 @@ void test_vluxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21115,13 +24111,13 @@ void test_vluxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, v // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21132,39 +24128,26 @@ void test_vluxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21173,13 +24156,13 @@ void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21190,13 +24173,13 @@ void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21209,13 +24192,13 @@ void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21230,13 +24213,13 @@ void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21253,13 +24236,13 @@ void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21278,26 +24261,26 @@ void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21306,13 +24289,13 @@ void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21323,13 +24306,13 @@ void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21342,13 +24325,13 @@ void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21363,13 +24346,13 @@ void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21386,13 +24369,13 @@ void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21411,26 +24394,26 @@ void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21439,13 +24422,13 @@ void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t ma // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21456,13 +24439,13 @@ void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21475,13 +24458,13 @@ void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21496,13 +24479,13 @@ void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21519,13 +24502,13 @@ void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21544,26 +24527,26 @@ void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -21572,13 +24555,13 @@ void test_vluxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -21589,13 +24572,13 @@ void test_vluxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -21608,13 +24591,13 @@ void test_vluxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -21629,13 +24612,13 @@ void test_vluxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -21652,13 +24635,13 @@ void test_vluxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -21677,1193 +24660,1397 @@ void test_vluxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -22872,13 +26059,13 @@ void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -22889,13 +26076,13 @@ void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -22908,13 +26095,13 @@ void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -22929,13 +26116,13 @@ void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -22952,13 +26139,13 @@ void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -22977,26 +26164,26 @@ void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -23005,13 +26192,13 @@ void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -23022,13 +26209,13 @@ void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -23041,13 +26228,13 @@ void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -23062,13 +26249,13 @@ void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -23085,13 +26272,13 @@ void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -23110,26 +26297,26 @@ void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -23138,13 +26325,13 @@ void test_vluxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -23155,3590 +26342,4034 @@ void test_vluxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// +void test_vluxseg4ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -26747,13 +30378,13 @@ void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -26764,13 +30395,13 @@ void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -26783,13 +30414,13 @@ void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -26804,13 +30435,13 @@ void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26827,13 +30458,13 @@ void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -26852,26 +30483,26 @@ void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -26880,13 +30511,13 @@ void test_vluxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -26897,13 +30528,13 @@ void test_vluxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -26916,13 +30547,13 @@ void test_vluxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -26937,13 +30568,13 @@ void test_vluxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -26960,13 +30591,13 @@ void test_vluxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -26985,26 +30616,26 @@ void test_vluxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27013,13 +30644,13 @@ void test_vluxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27030,39 +30661,39 @@ void test_vluxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27071,13 +30702,13 @@ void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27088,13 +30719,13 @@ void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27107,13 +30738,13 @@ void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27128,13 +30759,13 @@ void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27151,13 +30782,13 @@ void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27176,26 +30807,26 @@ void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27204,13 +30835,13 @@ void test_vluxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27221,13 +30852,13 @@ void test_vluxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27240,13 +30871,13 @@ void test_vluxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27261,13 +30892,13 @@ void test_vluxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27284,13 +30915,13 @@ void test_vluxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27309,26 +30940,26 @@ void test_vluxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27337,13 +30968,13 @@ void test_vluxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27354,39 +30985,39 @@ void test_vluxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27395,13 +31026,13 @@ void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27412,13 +31043,13 @@ void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27431,13 +31062,13 @@ void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27452,13 +31083,13 @@ void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27475,13 +31106,13 @@ void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27500,26 +31131,26 @@ void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27528,13 +31159,13 @@ void test_vluxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27545,13 +31176,13 @@ void test_vluxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27564,13 +31195,13 @@ void test_vluxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27585,13 +31216,13 @@ void test_vluxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27608,13 +31239,13 @@ void test_vluxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27633,26 +31264,26 @@ void test_vluxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27661,13 +31292,13 @@ void test_vluxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27678,39 +31309,39 @@ void test_vluxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27719,13 +31350,13 @@ void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27736,13 +31367,13 @@ void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27755,13 +31386,13 @@ void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27776,13 +31407,13 @@ void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27799,13 +31430,13 @@ void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27824,26 +31455,26 @@ void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27852,13 +31483,13 @@ void test_vluxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -27869,13 +31500,13 @@ void test_vluxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -27888,13 +31519,13 @@ void test_vluxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -27909,13 +31540,13 @@ void test_vluxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -27932,13 +31563,13 @@ void test_vluxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -27957,26 +31588,26 @@ void test_vluxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -27985,13 +31616,13 @@ void test_vluxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28002,39 +31633,39 @@ void test_vluxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28043,13 +31674,13 @@ void test_vluxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28060,13 +31691,13 @@ void test_vluxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28079,13 +31710,13 @@ void test_vluxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28100,13 +31731,13 @@ void test_vluxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28123,13 +31754,13 @@ void test_vluxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28148,26 +31779,26 @@ void test_vluxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28176,13 +31807,13 @@ void test_vluxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28193,39 +31824,39 @@ void test_vluxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28234,13 +31865,13 @@ void test_vluxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28251,13 +31882,13 @@ void test_vluxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28270,13 +31901,13 @@ void test_vluxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28291,13 +31922,13 @@ void test_vluxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28314,13 +31945,13 @@ void test_vluxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28339,26 +31970,26 @@ void test_vluxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28367,13 +31998,13 @@ void test_vluxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28384,39 +32015,39 @@ void test_vluxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28425,13 +32056,13 @@ void test_vluxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28442,13 +32073,13 @@ void test_vluxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28461,13 +32092,13 @@ void test_vluxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28482,13 +32113,13 @@ void test_vluxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28505,13 +32136,13 @@ void test_vluxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28530,26 +32161,26 @@ void test_vluxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28558,13 +32189,13 @@ void test_vluxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28575,39 +32206,39 @@ void test_vluxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28616,13 +32247,13 @@ void test_vluxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28633,13 +32264,13 @@ void test_vluxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28652,13 +32283,13 @@ void test_vluxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28673,13 +32304,13 @@ void test_vluxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28696,13 +32327,13 @@ void test_vluxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28721,26 +32352,26 @@ void test_vluxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28749,13 +32380,13 @@ void test_vluxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28766,20 +32397,20 @@ void test_vluxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c index 06a6f51..66c0d79 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmax.c @@ -884,882 +884,794 @@ vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vmaxu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmax(mask, maskedoff, op1, op2, vl); + return vmax(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmaxu(mask, maskedoff, op1, op2, vl); + return vmaxu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c index 7b78825..da001e7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmin.c @@ -884,882 +884,794 @@ vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vminu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmin(mask, maskedoff, op1, op2, vl); + return vmin(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vminu(mask, maskedoff, op1, op2, vl); + return vminu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c index 435ffae..129b8f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul.c @@ -2204,2202 +2204,1982 @@ vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { return vmulhsu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmul(mask, maskedoff, op1, op2, vl); + return vmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vmulh(mask, maskedoff, op1, op2, vl); + return vmulh(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhu(mask, maskedoff, op1, op2, vl); + return vmulhu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { - return vmulhsu(mask, maskedoff, op1, op2, vl); + return vmulhsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c index 29c242a..19fb973 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnclip.c @@ -619,692 +619,632 @@ vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { return vnclipu(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnclip(mask, maskedoff, op1, shift, vl); + return vnclip(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnclipu(mask, maskedoff, op1, shift, vl); + return vnclipu(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c index e7a9510..99d7645 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c @@ -304,303 +304,273 @@ vuint32m4_t test_vncvt_x_x_w_u32m4 (vuint64m8_t src, size_t vl) { return vncvt_x(src, vl); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vncvt_x_x_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vncvt_x_x_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vncvt_x_x_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vncvt_x_x_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vncvt_x_x_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vncvt_x_x_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vncvt_x_x_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vncvt_x_x_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vncvt_x_x_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vncvt_x_x_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vncvt_x_x_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vncvt_x_x_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vncvt_x_x_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vncvt_x_x_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vncvt_x_x_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vncvt_x_x_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vncvt_x_x_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vncvt_x_x_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vncvt_x_x_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vncvt_x_x_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vncvt_x_x_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vncvt_x_x_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vncvt_x_x_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vncvt_x_x_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vncvt_x_x_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vncvt_x_x_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vncvt_x_x_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vncvt_x_x_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vncvt_x_x_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { - return vncvt_x(mask, maskedoff, src, vl); + return vncvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c index ed5ab71..7bf8609 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c @@ -225,223 +225,201 @@ vint64m8_t test_vneg_v_i64m8 (vint64m8_t op1, size_t vl) { return vneg(op1, vl); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vneg_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vneg_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vneg_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vneg_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vneg_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vneg_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vneg_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vneg_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vneg_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vneg_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vneg_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vneg_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vneg_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vneg_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vneg_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vneg_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vneg_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vneg_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vneg_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vneg_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vneg(mask, maskedoff, op1, vl); + return vneg(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c index 391fc5c..a558e9c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c @@ -445,443 +445,399 @@ vuint64m8_t test_vnot_v_u64m8 (vuint64m8_t op1, size_t vl) { return vnot(op1, vl); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnot_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnot_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnot_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnot_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnot_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnot_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vnot_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnot_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnot_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnot_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnot_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnot_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vnot_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnot_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnot_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnot_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnot_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vnot_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vnot_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vnot_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vnot_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vnot_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnot_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnot_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnot_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnot_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnot_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnot_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vnot_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnot_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnot_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnot_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnot_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnot_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vnot_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnot_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnot_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnot_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnot_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vnot_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vnot_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vnot_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { - return vnot(mask, maskedoff, op1, vl); + return vnot(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c index e76b076..20278ca 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsra.c @@ -304,302 +304,272 @@ vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { return vnsra(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vnsra(mask, maskedoff, op1, shift, vl); + return vnsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c index e478bec..dffe4db 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnsrl.c @@ -304,302 +304,272 @@ vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { return vnsrl(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vnsrl(mask, maskedoff, op1, shift, vl); + return vnsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c index 6a84e87..cb43f77 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vor.c @@ -884,882 +884,794 @@ vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vor(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vor(mask, maskedoff, op1, op2, vl); + return vor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c index d791364..6b47207 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrem.c @@ -884,882 +884,794 @@ vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vremu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrem(mask, maskedoff, op1, op2, vl); + return vrem(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vremu(mask, maskedoff, op1, op2, vl); + return vremu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c index 04bfd70..0186f68 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrgather.c @@ -1672,1853 +1672,1696 @@ vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, return vrgatherei16(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { - return vrgather(mask, maskedoff, op1, index, vl); + return vrgather(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16(mask, maskedoff, op1, op2, vl); + return vrgatherei16(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c index 8ecd01f..e9c97ad 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vrsub.c @@ -444,442 +444,398 @@ vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vrsub(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vrsub(mask, maskedoff, op1, op2, vl); + return vrsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c index 2763370..f1254d9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsadd.c @@ -887,994 +887,906 @@ vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vsaddu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsadd(mask, maskedoff, op1, op2, vl); + return vsadd(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsaddu(mask, maskedoff, op1, op2, vl); + return vsaddu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c index 4e7bb76..8f59c29 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vse.c @@ -1,1177 +1,1192 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { +void test_vse8_v_i8mf8 (int8_t *base, vint8mf8_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { +void test_vse8_v_i8mf4 (int8_t *base, vint8mf4_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { +void test_vse8_v_i8mf2 (int8_t *base, vint8mf2_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { +void test_vse8_v_i8m1 (int8_t *base, vint8m1_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { +void test_vse8_v_i8m2 (int8_t *base, vint8m2_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { +void test_vse8_v_i8m4 (int8_t *base, vint8m4_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { +void test_vse8_v_i8m8 (int8_t *base, vint8m8_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { +void test_vse16_v_i16mf4 (int16_t *base, vint16mf4_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { +void test_vse16_v_i16mf2 (int16_t *base, vint16mf2_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { +void test_vse16_v_i16m1 (int16_t *base, vint16m1_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { +void test_vse16_v_i16m2 (int16_t *base, vint16m2_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { +void test_vse16_v_i16m4 (int16_t *base, vint16m4_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { +void test_vse16_v_i16m8 (int16_t *base, vint16m8_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { +void test_vse32_v_i32mf2 (int32_t *base, vint32mf2_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { +void test_vse32_v_i32m1 (int32_t *base, vint32m1_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { +void test_vse32_v_i32m2 (int32_t *base, vint32m2_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { +void test_vse32_v_i32m4 (int32_t *base, vint32m4_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { +void test_vse32_v_i32m8 (int32_t *base, vint32m8_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { +void test_vse64_v_i64m1 (int64_t *base, vint64m1_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { +void test_vse64_v_i64m2 (int64_t *base, vint64m2_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { +void test_vse64_v_i64m4 (int64_t *base, vint64m4_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { +void test_vse64_v_i64m8 (int64_t *base, vint64m8_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { +void test_vse8_v_u8mf8 (uint8_t *base, vuint8mf8_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { +void test_vse8_v_u8mf4 (uint8_t *base, vuint8mf4_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { +void test_vse8_v_u8mf2 (uint8_t *base, vuint8mf2_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { +void test_vse8_v_u8m1 (uint8_t *base, vuint8m1_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { +void test_vse8_v_u8m2 (uint8_t *base, vuint8m2_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { +void test_vse8_v_u8m4 (uint8_t *base, vuint8m4_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { +void test_vse8_v_u8m8 (uint8_t *base, vuint8m8_t value, size_t vl) { return vse8(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { +void test_vse16_v_u16mf4 (uint16_t *base, vuint16mf4_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { +void test_vse16_v_u16mf2 (uint16_t *base, vuint16mf2_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { +void test_vse16_v_u16m1 (uint16_t *base, vuint16m1_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { +void test_vse16_v_u16m2 (uint16_t *base, vuint16m2_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { +void test_vse16_v_u16m4 (uint16_t *base, vuint16m4_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { +void test_vse16_v_u16m8 (uint16_t *base, vuint16m8_t value, size_t vl) { return vse16(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { +void test_vse32_v_u32mf2 (uint32_t *base, vuint32mf2_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { +void test_vse32_v_u32m1 (uint32_t *base, vuint32m1_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { +void test_vse32_v_u32m2 (uint32_t *base, vuint32m2_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { +void test_vse32_v_u32m4 (uint32_t *base, vuint32m4_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { +void test_vse32_v_u32m8 (uint32_t *base, vuint32m8_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { +void test_vse64_v_u64m1 (uint64_t *base, vuint64m1_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { +void test_vse64_v_u64m2 (uint64_t *base, vuint64m2_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { +void test_vse64_v_u64m4 (uint64_t *base, vuint64m4_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { +void test_vse64_v_u64m8 (uint64_t *base, vuint64m8_t value, size_t vl) { return vse64(base, value, vl); } +// CHECK-RV64-LABEL: @test_vse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // +void test_vse16_v_f16mf4 (_Float16 *base, vfloat16mf4_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf2 (_Float16 *base, vfloat16mf2_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m1 (_Float16 *base, vfloat16m1_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m2 (_Float16 *base, vfloat16m2_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m4 (_Float16 *base, vfloat16m4_t value, size_t vl) { + return vse16(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m8 (_Float16 *base, vfloat16m8_t value, size_t vl) { + return vse16(base, value, vl); +} + // CHECK-RV64-LABEL: @test_vse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { +void test_vse32_v_f32mf2 (float *base, vfloat32mf2_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { +void test_vse32_v_f32m1 (float *base, vfloat32m1_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { +void test_vse32_v_f32m2 (float *base, vfloat32m2_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { +void test_vse32_v_f32m4 (float *base, vfloat32m4_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { +void test_vse32_v_f32m8 (float *base, vfloat32m8_t value, size_t vl) { return vse32(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { +void test_vse64_v_f64m1 (double *base, vfloat64m1_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { +void test_vse64_v_f64m2 (double *base, vfloat64m2_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { +void test_vse64_v_f64m4 (double *base, vfloat64m4_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { +void test_vse64_v_f64m8 (double *base, vfloat64m8_t value, size_t vl) { return vse64(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { +void test_vse8_v_i8mf8_m (vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { +void test_vse8_v_i8mf4_m (vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { +void test_vse8_v_i8mf2_m (vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { +void test_vse8_v_i8m1_m (vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { +void test_vse8_v_i8m2_m (vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { +void test_vse8_v_i8m4_m (vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { +void test_vse8_v_i8m8_m (vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { +void test_vse16_v_i16mf4_m (vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { +void test_vse16_v_i16mf2_m (vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { +void test_vse16_v_i16m1_m (vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { +void test_vse16_v_i16m2_m (vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { +void test_vse16_v_i16m4_m (vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { +void test_vse16_v_i16m8_m (vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { +void test_vse32_v_i32mf2_m (vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { +void test_vse32_v_i32m1_m (vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { +void test_vse32_v_i32m2_m (vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { +void test_vse32_v_i32m4_m (vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { +void test_vse32_v_i32m8_m (vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { +void test_vse64_v_i64m1_m (vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { +void test_vse64_v_i64m2_m (vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { +void test_vse64_v_i64m4_m (vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { +void test_vse64_v_i64m8_m (vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { +void test_vse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { +void test_vse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { +void test_vse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { +void test_vse8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { +void test_vse8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { +void test_vse8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { +void test_vse8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { return vse8(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { +void test_vse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { +void test_vse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { +void test_vse16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { +void test_vse16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { +void test_vse16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { +void test_vse16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { return vse16(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { +void test_vse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { +void test_vse32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { +void test_vse32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { +void test_vse32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { +void test_vse32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { +void test_vse64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { +void test_vse64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { +void test_vse64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { +void test_vse64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { return vse64(mask, base, value, vl); } +// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // +void test_vse16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m8_m (vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { + return vse16(mask, base, value, vl); +} + // CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { +void test_vse32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { +void test_vse32_v_f32m1_m (vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { +void test_vse32_v_f32m2_m (vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { +void test_vse32_v_f32m4_m (vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { +void test_vse32_v_f32m8_m (vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { return vse32(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { +void test_vse64_v_f64m1_m (vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { +void test_vse64_v_f64m2_m (vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { +void test_vse64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { +void test_vse64_v_f64m8_m (vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { return vse64(mask, base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1182,7 +1197,6 @@ void test_vse1_v_b1(uint8_t *base, vbool1_t value, size_t vl) { return vse1(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1193,7 +1207,6 @@ void test_vse1_v_b2(uint8_t *base, vbool2_t value, size_t vl) { return vse1(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1204,7 +1217,6 @@ void test_vse1_v_b4(uint8_t *base, vbool4_t value, size_t vl) { return vse1(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1215,7 +1227,6 @@ void test_vse1_v_b8(uint8_t *base, vbool8_t value, size_t vl) { return vse1(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b16( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1226,7 +1237,6 @@ void test_vse1_v_b16(uint8_t *base, vbool16_t value, size_t vl) { return vse1(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1237,7 +1247,6 @@ void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { return vse1(base, value, vl); } -// // CHECK-RV64-LABEL: @test_vse1_v_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c index 64f8ab7..0cae4f5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c @@ -284,310 +284,282 @@ vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { return vsext_vf2(op1, vl); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return vsext_vf8(mask, maskedoff, op1, vl); + return vsext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return vsext_vf4(mask, maskedoff, op1, vl); + return vsext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { - return vsext_vf2(mask, maskedoff, op1, vl); + return vsext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c index 1d9d3c5..b4c435b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1down.c @@ -465,529 +465,485 @@ vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, return vslide1down(src, value, vl); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1down(mask, maskedoff, src, value, vl); + return vslide1down(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c index 051d976..2014f21 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslide1up.c @@ -462,507 +462,463 @@ vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, return vslide1up(src, value, vl); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { - return vslide1up(mask, maskedoff, src, value, vl); + return vslide1up(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c index 532ac398..fd129f2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsll.c @@ -884,882 +884,794 @@ vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { return vsll(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsll(mask, maskedoff, op1, shift, vl); + return vsll(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c index e2ae796..a1ebb87 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul.c @@ -444,489 +444,445 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { return vsmul(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsmul(mask, maskedoff, op1, op2, vl); + return vsmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c index 442e107..c2403d8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsoxei.c @@ -1,4608 +1,4249 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsoxei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsoxei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsoxei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, - size_t vl) { +void test_vsoxei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, - size_t vl) { +void test_vsoxei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, - size_t vl) { +void test_vsoxei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, - size_t vl) { +void test_vsoxei8_v_i8m8 (int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsoxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsoxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsoxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, - size_t vl) { +void test_vsoxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, - size_t vl) { +void test_vsoxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, - size_t vl) { +void test_vsoxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsoxei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsoxei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsoxei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, - size_t vl) { +void test_vsoxei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, - size_t vl) { +void test_vsoxei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsoxei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsoxei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsoxei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, - size_t vl) { +void test_vsoxei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, - size_t vl) { +void test_vsoxei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, - size_t vl) { +void test_vsoxei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, - size_t vl) { +void test_vsoxei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, - size_t vl) { +void test_vsoxei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, - size_t vl) { +void test_vsoxei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, - size_t vl) { +void test_vsoxei8_v_i16m8 (int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsoxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsoxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, - size_t vl) { +void test_vsoxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, - size_t vl) { +void test_vsoxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, - size_t vl) { +void test_vsoxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, - size_t vl) { +void test_vsoxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsoxei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsoxei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, - size_t vl) { +void test_vsoxei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, - size_t vl) { +void test_vsoxei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, - size_t vl) { +void test_vsoxei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsoxei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsoxei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, - size_t vl) { +void test_vsoxei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, - size_t vl) { +void test_vsoxei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, - size_t vl) { +void test_vsoxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, - size_t vl) { +void test_vsoxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, - size_t vl) { +void test_vsoxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, - size_t vl) { +void test_vsoxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, - size_t vl) { +void test_vsoxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsoxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, - size_t vl) { +void test_vsoxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, - size_t vl) { +void test_vsoxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, - size_t vl) { +void test_vsoxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, - size_t vl) { +void test_vsoxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsoxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, - size_t vl) { +void test_vsoxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, - size_t vl) { +void test_vsoxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, - size_t vl) { +void test_vsoxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, - size_t vl) { +void test_vsoxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsoxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, - size_t vl) { +void test_vsoxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, - size_t vl) { +void test_vsoxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, - size_t vl) { +void test_vsoxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, - size_t vl) { +void test_vsoxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, - size_t vl) { +void test_vsoxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, - size_t vl) { +void test_vsoxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, - size_t vl) { +void test_vsoxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, - size_t vl) { +void test_vsoxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, - size_t vl) { +void test_vsoxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, - size_t vl) { +void test_vsoxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, - size_t vl) { +void test_vsoxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, - size_t vl) { +void test_vsoxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, - size_t vl) { +void test_vsoxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, - size_t vl) { +void test_vsoxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, - size_t vl) { +void test_vsoxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, - size_t vl) { +void test_vsoxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, - size_t vl) { +void test_vsoxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, - size_t vl) { +void test_vsoxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, - size_t vl) { +void test_vsoxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, - size_t vl) { +void test_vsoxei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, - size_t vl) { +void test_vsoxei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsoxei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsoxei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, - size_t vl) { +void test_vsoxei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, - size_t vl) { +void test_vsoxei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, - size_t vl) { +void test_vsoxei8_v_u8m8 (uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsoxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsoxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsoxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsoxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, - size_t vl) { +void test_vsoxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, - size_t vl) { +void test_vsoxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsoxei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, - size_t vl) { +void test_vsoxei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsoxei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsoxei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, - size_t vl) { +void test_vsoxei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, - size_t vl) { +void test_vsoxei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, - size_t vl) { +void test_vsoxei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsoxei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsoxei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsoxei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsoxei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, - size_t vl) { +void test_vsoxei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, - size_t vl) { +void test_vsoxei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, - size_t vl) { +void test_vsoxei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, - size_t vl) { +void test_vsoxei8_v_u16m8 (uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsoxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsoxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsoxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, - vuint16m8_t value, size_t vl) { +void test_vsoxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsoxei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsoxei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsoxei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsoxei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsoxei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsoxei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, - size_t vl) { +void test_vsoxei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, - size_t vl) { +void test_vsoxei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, - size_t vl) { +void test_vsoxei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, - size_t vl) { +void test_vsoxei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsoxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsoxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsoxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsoxei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsoxei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsoxei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsoxei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsoxei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, - size_t vl) { +void test_vsoxei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, - size_t vl) { +void test_vsoxei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, - size_t vl) { +void test_vsoxei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, - size_t vl) { +void test_vsoxei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsoxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsoxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsoxei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsoxei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsoxei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsoxei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, - size_t vl) { +void test_vsoxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsoxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsoxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsoxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, - size_t vl) { +void test_vsoxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2( +void test_vsoxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32mf2(float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsoxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsoxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsoxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, - size_t vl) { +void test_vsoxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2( +void test_vsoxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32mf2(float *base, vuint32mf2_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsoxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsoxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsoxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8( +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, - size_t vl) { +void test_vsoxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32mf2(float *base, vuint64m1_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsoxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsoxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsoxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, - size_t vl) { +void test_vsoxei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, - size_t vl) { +void test_vsoxei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { + return vsoxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { + return vsoxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { + return vsoxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { + return vsoxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsoxei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsoxei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m1(double *base, vuint16mf4_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsoxei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m2(double *base, vuint16mf2_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsoxei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsoxei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsoxei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m1(double *base, vuint32mf2_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsoxei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, - size_t vl) { +void test_vsoxei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsoxei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsoxei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, - size_t vl) { +void test_vsoxei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, - size_t vl) { +void test_vsoxei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsoxei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsoxei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsoxei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsoxei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsoxei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, - vint8m1_t value, size_t vl) { +void test_vsoxei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, - vint8m2_t value, size_t vl) { +void test_vsoxei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, - vint8m4_t value, size_t vl) { +void test_vsoxei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, - vint8m8_t value, size_t vl) { +void test_vsoxei8_v_i8m8_m (vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsoxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsoxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsoxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, - vint8m1_t value, size_t vl) { +void test_vsoxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, - vint8m2_t value, size_t vl) { +void test_vsoxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, - vint8m4_t value, size_t vl) { +void test_vsoxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsoxei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsoxei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsoxei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, - vint8m1_t value, size_t vl) { +void test_vsoxei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, - vint8m2_t value, size_t vl) { +void test_vsoxei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsoxei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsoxei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsoxei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, - vint8m1_t value, size_t vl) { +void test_vsoxei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsoxei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsoxei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, - vint16m1_t value, size_t vl) { +void test_vsoxei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, - vint16m2_t value, size_t vl) { +void test_vsoxei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, - vint16m4_t value, size_t vl) { +void test_vsoxei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, - vint16m8_t value, size_t vl) { +void test_vsoxei8_v_i16m8_m (vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, - vuint16mf4_t bindex, vint16mf4_t value, - size_t vl) { +void test_vsoxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, - vuint16mf2_t bindex, vint16mf2_t value, - size_t vl) { +void test_vsoxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, - vint16m1_t value, size_t vl) { +void test_vsoxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, - vint16m2_t value, size_t vl) { +void test_vsoxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, - vint16m4_t value, size_t vl) { +void test_vsoxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, - vint16m8_t value, size_t vl) { +void test_vsoxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, - vuint32mf2_t bindex, vint16mf4_t value, - size_t vl) { +void test_vsoxei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsoxei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, - vint16m1_t value, size_t vl) { +void test_vsoxei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, - vint16m2_t value, size_t vl) { +void test_vsoxei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, - vint16m4_t value, size_t vl) { +void test_vsoxei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsoxei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsoxei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, - vint16m1_t value, size_t vl) { +void test_vsoxei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, - vint16m2_t value, size_t vl) { +void test_vsoxei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsoxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, - vint32m1_t value, size_t vl) { +void test_vsoxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, - vint32m2_t value, size_t vl) { +void test_vsoxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, - vint32m4_t value, size_t vl) { +void test_vsoxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, - vint32m8_t value, size_t vl) { +void test_vsoxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint16mf4_t bindex, vint32mf2_t value, - size_t vl) { +void test_vsoxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, - vint32m1_t value, size_t vl) { +void test_vsoxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, - vint32m2_t value, size_t vl) { +void test_vsoxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, - vint32m4_t value, size_t vl) { +void test_vsoxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, - vint32m8_t value, size_t vl) { +void test_vsoxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint32mf2_t bindex, vint32mf2_t value, - size_t vl) { +void test_vsoxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, - vint32m1_t value, size_t vl) { +void test_vsoxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, - vint32m2_t value, size_t vl) { +void test_vsoxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, - vint32m4_t value, size_t vl) { +void test_vsoxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, - vint32m8_t value, size_t vl) { +void test_vsoxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsoxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, - vint32m1_t value, size_t vl) { +void test_vsoxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, - vint32m2_t value, size_t vl) { +void test_vsoxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, - vint32m4_t value, size_t vl) { +void test_vsoxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, - vint64m1_t value, size_t vl) { +void test_vsoxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, - vint64m2_t value, size_t vl) { +void test_vsoxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, - vint64m4_t value, size_t vl) { +void test_vsoxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, - vint64m8_t value, size_t vl) { +void test_vsoxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, - vint64m1_t value, size_t vl) { +void test_vsoxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, - vint64m2_t value, size_t vl) { +void test_vsoxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, - vint64m4_t value, size_t vl) { +void test_vsoxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, - vint64m8_t value, size_t vl) { +void test_vsoxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, - vint64m1_t value, size_t vl) { +void test_vsoxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, - vint64m2_t value, size_t vl) { +void test_vsoxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, - vint64m4_t value, size_t vl) { +void test_vsoxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, - vint64m8_t value, size_t vl) { +void test_vsoxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, - vint64m1_t value, size_t vl) { +void test_vsoxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, - vint64m2_t value, size_t vl) { +void test_vsoxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, - vint64m4_t value, size_t vl) { +void test_vsoxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, - vint64m8_t value, size_t vl) { +void test_vsoxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsoxei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsoxei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsoxei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsoxei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, - vuint8m2_t value, size_t vl) { +void test_vsoxei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, - vuint8m4_t value, size_t vl) { +void test_vsoxei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, - vuint8m8_t value, size_t vl) { +void test_vsoxei8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsoxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsoxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsoxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsoxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, - vuint8m2_t value, size_t vl) { +void test_vsoxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, - vuint8m4_t value, size_t vl) { +void test_vsoxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsoxei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsoxei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsoxei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsoxei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, - vuint8m2_t value, size_t vl) { +void test_vsoxei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsoxei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsoxei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsoxei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsoxei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsoxei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsoxei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsoxei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, - vuint16m8_t value, size_t vl) { +void test_vsoxei8_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, - vuint16mf4_t bindex, vuint16mf4_t value, - size_t vl) { +void test_vsoxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, - vuint16mf2_t bindex, vuint16mf2_t value, - size_t vl) { +void test_vsoxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsoxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, - vuint16m8_t value, size_t vl) { +void test_vsoxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, - vuint32mf2_t bindex, vuint16mf4_t value, - size_t vl) { +void test_vsoxei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, - vuint32m1_t bindex, vuint16mf2_t value, - size_t vl) { +void test_vsoxei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsoxei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, - vuint64m1_t bindex, vuint16mf4_t value, - size_t vl) { +void test_vsoxei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, - vuint64m2_t bindex, vuint16mf2_t value, - size_t vl) { +void test_vsoxei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsoxei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsoxei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsoxei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsoxei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsoxei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint16mf4_t bindex, vuint32mf2_t value, - size_t vl) { +void test_vsoxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, - vuint16mf2_t bindex, vuint32m1_t value, - size_t vl) { +void test_vsoxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsoxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint32mf2_t bindex, vuint32mf2_t value, - size_t vl) { +void test_vsoxei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsoxei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsoxei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint64m1_t bindex, vuint32mf2_t value, - size_t vl) { +void test_vsoxei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsoxei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsoxei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsoxei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsoxei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsoxei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint16mf4_t bindex, vuint64m1_t value, - size_t vl) { +void test_vsoxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, - vuint16mf2_t bindex, vuint64m2_t value, - size_t vl) { +void test_vsoxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint32mf2_t bindex, vuint64m1_t value, - size_t vl) { +void test_vsoxei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsoxei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsoxei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsoxei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsoxei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsoxei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // +void test_vsoxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64(mask, base, bindex, value, vl); +} + // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsoxei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsoxei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsoxei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, - vfloat32m8_t value, size_t vl) { +void test_vsoxei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsoxei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsoxei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsoxei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, - vfloat32m8_t value, size_t vl) { +void test_vsoxei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsoxei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsoxei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsoxei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, - vfloat32m8_t value, size_t vl) { +void test_vsoxei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsoxei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsoxei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsoxei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsoxei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsoxei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsoxei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsoxei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsoxei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsoxei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsoxei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsoxei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsoxei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsoxei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsoxei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsoxei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsoxei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsoxei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsoxei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsoxei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsoxei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei64(mask, base, bindex, value, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c index 389f4b0..48e112d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsra.c @@ -444,442 +444,398 @@ vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { return vsra(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vsra(mask, maskedoff, op1, shift, vl); + return vsra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c index afc25dc..57d0b89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsrl.c @@ -444,442 +444,398 @@ vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { return vsrl(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vsrl(mask, maskedoff, op1, shift, vl); + return vsrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c index f64b6d1..caf695f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsse.c @@ -1,1278 +1,1189 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf8(int8_t *base, ptrdiff_t bstride, vint8mf8_t value, - size_t vl) { +void test_vsse8_v_i8mf8 (int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf4(int8_t *base, ptrdiff_t bstride, vint8mf4_t value, - size_t vl) { +void test_vsse8_v_i8mf4 (int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf2(int8_t *base, ptrdiff_t bstride, vint8mf2_t value, - size_t vl) { +void test_vsse8_v_i8mf2 (int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m1(int8_t *base, ptrdiff_t bstride, vint8m1_t value, - size_t vl) { +void test_vsse8_v_i8m1 (int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m2(int8_t *base, ptrdiff_t bstride, vint8m2_t value, - size_t vl) { +void test_vsse8_v_i8m2 (int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m4(int8_t *base, ptrdiff_t bstride, vint8m4_t value, - size_t vl) { +void test_vsse8_v_i8m4 (int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m8(int8_t *base, ptrdiff_t bstride, vint8m8_t value, - size_t vl) { +void test_vsse8_v_i8m8 (int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf4(int16_t *base, ptrdiff_t bstride, vint16mf4_t value, - size_t vl) { +void test_vsse16_v_i16mf4 (int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf2(int16_t *base, ptrdiff_t bstride, vint16mf2_t value, - size_t vl) { +void test_vsse16_v_i16mf2 (int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m1(int16_t *base, ptrdiff_t bstride, vint16m1_t value, - size_t vl) { +void test_vsse16_v_i16m1 (int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m2(int16_t *base, ptrdiff_t bstride, vint16m2_t value, - size_t vl) { +void test_vsse16_v_i16m2 (int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m4(int16_t *base, ptrdiff_t bstride, vint16m4_t value, - size_t vl) { +void test_vsse16_v_i16m4 (int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m8(int16_t *base, ptrdiff_t bstride, vint16m8_t value, - size_t vl) { +void test_vsse16_v_i16m8 (int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32mf2(int32_t *base, ptrdiff_t bstride, vint32mf2_t value, - size_t vl) { +void test_vsse32_v_i32mf2 (int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m1(int32_t *base, ptrdiff_t bstride, vint32m1_t value, - size_t vl) { +void test_vsse32_v_i32m1 (int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m2(int32_t *base, ptrdiff_t bstride, vint32m2_t value, - size_t vl) { +void test_vsse32_v_i32m2 (int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m4(int32_t *base, ptrdiff_t bstride, vint32m4_t value, - size_t vl) { +void test_vsse32_v_i32m4 (int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m8(int32_t *base, ptrdiff_t bstride, vint32m8_t value, - size_t vl) { +void test_vsse32_v_i32m8 (int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m1(int64_t *base, ptrdiff_t bstride, vint64m1_t value, - size_t vl) { +void test_vsse64_v_i64m1 (int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m2(int64_t *base, ptrdiff_t bstride, vint64m2_t value, - size_t vl) { +void test_vsse64_v_i64m2 (int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m4(int64_t *base, ptrdiff_t bstride, vint64m4_t value, - size_t vl) { +void test_vsse64_v_i64m4 (int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m8(int64_t *base, ptrdiff_t bstride, vint64m8_t value, - size_t vl) { +void test_vsse64_v_i64m8 (int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf8(uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, - size_t vl) { +void test_vsse8_v_u8mf8 (uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf4(uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, - size_t vl) { +void test_vsse8_v_u8mf4 (uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf2(uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, - size_t vl) { +void test_vsse8_v_u8mf2 (uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m1(uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, - size_t vl) { +void test_vsse8_v_u8m1 (uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m2(uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, - size_t vl) { +void test_vsse8_v_u8m2 (uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m4(uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, - size_t vl) { +void test_vsse8_v_u8m4 (uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m8(uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, - size_t vl) { +void test_vsse8_v_u8m8 (uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { return vsse8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf4(uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, - size_t vl) { +void test_vsse16_v_u16mf4 (uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf2(uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, - size_t vl) { +void test_vsse16_v_u16mf2 (uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m1(uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, - size_t vl) { +void test_vsse16_v_u16m1 (uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m2(uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, - size_t vl) { +void test_vsse16_v_u16m2 (uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m4(uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, - size_t vl) { +void test_vsse16_v_u16m4 (uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m8(uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, - size_t vl) { +void test_vsse16_v_u16m8 (uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { return vsse16(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32mf2(uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, - size_t vl) { +void test_vsse32_v_u32mf2 (uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m1(uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, - size_t vl) { +void test_vsse32_v_u32m1 (uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m2(uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, - size_t vl) { +void test_vsse32_v_u32m2 (uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m4(uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, - size_t vl) { +void test_vsse32_v_u32m4 (uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m8(uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, - size_t vl) { +void test_vsse32_v_u32m8 (uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m1(uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, - size_t vl) { +void test_vsse64_v_u64m1 (uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m2(uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, - size_t vl) { +void test_vsse64_v_u64m2 (uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m4(uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, - size_t vl) { +void test_vsse64_v_u64m4 (uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, - size_t vl) { +void test_vsse64_v_u64m8 (uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { return vsse64(base, bstride, value, vl); } +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { + return vsse16(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { + return vsse16(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return vsse16(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { + return vsse16(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // +void test_vsse16_v_f16m4 (_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { + return vsse16(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m8 (_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { + return vsse16(base, bstride, value, vl); +} + // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32mf2(float *base, ptrdiff_t bstride, vfloat32mf2_t value, - size_t vl) { +void test_vsse32_v_f32mf2 (float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m1(float *base, ptrdiff_t bstride, vfloat32m1_t value, - size_t vl) { +void test_vsse32_v_f32m1 (float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m2(float *base, ptrdiff_t bstride, vfloat32m2_t value, - size_t vl) { +void test_vsse32_v_f32m2 (float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m4(float *base, ptrdiff_t bstride, vfloat32m4_t value, - size_t vl) { +void test_vsse32_v_f32m4 (float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m8(float *base, ptrdiff_t bstride, vfloat32m8_t value, - size_t vl) { +void test_vsse32_v_f32m8 (float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { return vsse32(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m1(double *base, ptrdiff_t bstride, vfloat64m1_t value, - size_t vl) { +void test_vsse64_v_f64m1 (double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m2(double *base, ptrdiff_t bstride, vfloat64m2_t value, - size_t vl) { +void test_vsse64_v_f64m2 (double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m4(double *base, ptrdiff_t bstride, vfloat64m4_t value, - size_t vl) { +void test_vsse64_v_f64m4 (double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m8(double *base, ptrdiff_t bstride, vfloat64m8_t value, - size_t vl) { +void test_vsse64_v_f64m8 (double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { return vsse64(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf8_m(vbool64_t mask, int8_t *base, ptrdiff_t bstride, - vint8mf8_t value, size_t vl) { +void test_vsse8_v_i8mf8_m (vbool64_t mask, int8_t *base, ptrdiff_t bstride, vint8mf8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf4_m(vbool32_t mask, int8_t *base, ptrdiff_t bstride, - vint8mf4_t value, size_t vl) { +void test_vsse8_v_i8mf4_m (vbool32_t mask, int8_t *base, ptrdiff_t bstride, vint8mf4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8mf2_m(vbool16_t mask, int8_t *base, ptrdiff_t bstride, - vint8mf2_t value, size_t vl) { +void test_vsse8_v_i8mf2_m (vbool16_t mask, int8_t *base, ptrdiff_t bstride, vint8mf2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m1_m(vbool8_t mask, int8_t *base, ptrdiff_t bstride, - vint8m1_t value, size_t vl) { +void test_vsse8_v_i8m1_m (vbool8_t mask, int8_t *base, ptrdiff_t bstride, vint8m1_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m2_m(vbool4_t mask, int8_t *base, ptrdiff_t bstride, - vint8m2_t value, size_t vl) { +void test_vsse8_v_i8m2_m (vbool4_t mask, int8_t *base, ptrdiff_t bstride, vint8m2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m4_m(vbool2_t mask, int8_t *base, ptrdiff_t bstride, - vint8m4_t value, size_t vl) { +void test_vsse8_v_i8m4_m (vbool2_t mask, int8_t *base, ptrdiff_t bstride, vint8m4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_i8m8_m(vbool1_t mask, int8_t *base, ptrdiff_t bstride, - vint8m8_t value, size_t vl) { +void test_vsse8_v_i8m8_m (vbool1_t mask, int8_t *base, ptrdiff_t bstride, vint8m8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf4_m(vbool64_t mask, int16_t *base, ptrdiff_t bstride, - vint16mf4_t value, size_t vl) { +void test_vsse16_v_i16mf4_m (vbool64_t mask, int16_t *base, ptrdiff_t bstride, vint16mf4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16mf2_m(vbool32_t mask, int16_t *base, ptrdiff_t bstride, - vint16mf2_t value, size_t vl) { +void test_vsse16_v_i16mf2_m (vbool32_t mask, int16_t *base, ptrdiff_t bstride, vint16mf2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m1_m(vbool16_t mask, int16_t *base, ptrdiff_t bstride, - vint16m1_t value, size_t vl) { +void test_vsse16_v_i16m1_m (vbool16_t mask, int16_t *base, ptrdiff_t bstride, vint16m1_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m2_m(vbool8_t mask, int16_t *base, ptrdiff_t bstride, - vint16m2_t value, size_t vl) { +void test_vsse16_v_i16m2_m (vbool8_t mask, int16_t *base, ptrdiff_t bstride, vint16m2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m4_m(vbool4_t mask, int16_t *base, ptrdiff_t bstride, - vint16m4_t value, size_t vl) { +void test_vsse16_v_i16m4_m (vbool4_t mask, int16_t *base, ptrdiff_t bstride, vint16m4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_i16m8_m(vbool2_t mask, int16_t *base, ptrdiff_t bstride, - vint16m8_t value, size_t vl) { +void test_vsse16_v_i16m8_m (vbool2_t mask, int16_t *base, ptrdiff_t bstride, vint16m8_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32mf2_m(vbool64_t mask, int32_t *base, ptrdiff_t bstride, - vint32mf2_t value, size_t vl) { +void test_vsse32_v_i32mf2_m (vbool64_t mask, int32_t *base, ptrdiff_t bstride, vint32mf2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m1_m(vbool32_t mask, int32_t *base, ptrdiff_t bstride, - vint32m1_t value, size_t vl) { +void test_vsse32_v_i32m1_m (vbool32_t mask, int32_t *base, ptrdiff_t bstride, vint32m1_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m2_m(vbool16_t mask, int32_t *base, ptrdiff_t bstride, - vint32m2_t value, size_t vl) { +void test_vsse32_v_i32m2_m (vbool16_t mask, int32_t *base, ptrdiff_t bstride, vint32m2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m4_m(vbool8_t mask, int32_t *base, ptrdiff_t bstride, - vint32m4_t value, size_t vl) { +void test_vsse32_v_i32m4_m (vbool8_t mask, int32_t *base, ptrdiff_t bstride, vint32m4_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_i32m8_m(vbool4_t mask, int32_t *base, ptrdiff_t bstride, - vint32m8_t value, size_t vl) { +void test_vsse32_v_i32m8_m (vbool4_t mask, int32_t *base, ptrdiff_t bstride, vint32m8_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m1_m(vbool64_t mask, int64_t *base, ptrdiff_t bstride, - vint64m1_t value, size_t vl) { +void test_vsse64_v_i64m1_m (vbool64_t mask, int64_t *base, ptrdiff_t bstride, vint64m1_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m2_m(vbool32_t mask, int64_t *base, ptrdiff_t bstride, - vint64m2_t value, size_t vl) { +void test_vsse64_v_i64m2_m (vbool32_t mask, int64_t *base, ptrdiff_t bstride, vint64m2_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m4_m(vbool16_t mask, int64_t *base, ptrdiff_t bstride, - vint64m4_t value, size_t vl) { +void test_vsse64_v_i64m4_m (vbool16_t mask, int64_t *base, ptrdiff_t bstride, vint64m4_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_i64m8_m(vbool8_t mask, int64_t *base, ptrdiff_t bstride, - vint64m8_t value, size_t vl) { +void test_vsse64_v_i64m8_m (vbool8_t mask, int64_t *base, ptrdiff_t bstride, vint64m8_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8mf8_t value, size_t vl) { +void test_vsse8_v_u8mf8_m (vbool64_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8mf4_t value, size_t vl) { +void test_vsse8_v_u8mf4_m (vbool32_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8mf2_t value, size_t vl) { +void test_vsse8_v_u8mf2_m (vbool16_t mask, uint8_t *base, ptrdiff_t bstride, vuint8mf2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m1_m(vbool8_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8m1_t value, size_t vl) { +void test_vsse8_v_u8m1_m (vbool8_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m1_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m2_m(vbool4_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8m2_t value, size_t vl) { +void test_vsse8_v_u8m2_m (vbool4_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m2_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m4_m(vbool2_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8m4_t value, size_t vl) { +void test_vsse8_v_u8m4_m (vbool2_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m4_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse8_v_u8m8_m(vbool1_t mask, uint8_t *base, ptrdiff_t bstride, - vuint8m8_t value, size_t vl) { +void test_vsse8_v_u8m8_m (vbool1_t mask, uint8_t *base, ptrdiff_t bstride, vuint8m8_t value, size_t vl) { return vsse8(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, ptrdiff_t bstride, - vuint16mf4_t value, size_t vl) { +void test_vsse16_v_u16mf4_m (vbool64_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, ptrdiff_t bstride, - vuint16mf2_t value, size_t vl) { +void test_vsse16_v_u16mf2_m (vbool32_t mask, uint16_t *base, ptrdiff_t bstride, vuint16mf2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m1_m(vbool16_t mask, uint16_t *base, ptrdiff_t bstride, - vuint16m1_t value, size_t vl) { +void test_vsse16_v_u16m1_m (vbool16_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m1_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m2_m(vbool8_t mask, uint16_t *base, ptrdiff_t bstride, - vuint16m2_t value, size_t vl) { +void test_vsse16_v_u16m2_m (vbool8_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m2_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m4_m(vbool4_t mask, uint16_t *base, ptrdiff_t bstride, - vuint16m4_t value, size_t vl) { +void test_vsse16_v_u16m4_m (vbool4_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m4_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse16_v_u16m8_m(vbool2_t mask, uint16_t *base, ptrdiff_t bstride, - vuint16m8_t value, size_t vl) { +void test_vsse16_v_u16m8_m (vbool2_t mask, uint16_t *base, ptrdiff_t bstride, vuint16m8_t value, size_t vl) { return vsse16(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, ptrdiff_t bstride, - vuint32mf2_t value, size_t vl) { +void test_vsse32_v_u32mf2_m (vbool64_t mask, uint32_t *base, ptrdiff_t bstride, vuint32mf2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m1_m(vbool32_t mask, uint32_t *base, ptrdiff_t bstride, - vuint32m1_t value, size_t vl) { +void test_vsse32_v_u32m1_m (vbool32_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m1_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m2_m(vbool16_t mask, uint32_t *base, ptrdiff_t bstride, - vuint32m2_t value, size_t vl) { +void test_vsse32_v_u32m2_m (vbool16_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m4_m(vbool8_t mask, uint32_t *base, ptrdiff_t bstride, - vuint32m4_t value, size_t vl) { +void test_vsse32_v_u32m4_m (vbool8_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m4_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_u32m8_m(vbool4_t mask, uint32_t *base, ptrdiff_t bstride, - vuint32m8_t value, size_t vl) { +void test_vsse32_v_u32m8_m (vbool4_t mask, uint32_t *base, ptrdiff_t bstride, vuint32m8_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m1_m(vbool64_t mask, uint64_t *base, ptrdiff_t bstride, - vuint64m1_t value, size_t vl) { +void test_vsse64_v_u64m1_m (vbool64_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m1_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m2_m(vbool32_t mask, uint64_t *base, ptrdiff_t bstride, - vuint64m2_t value, size_t vl) { +void test_vsse64_v_u64m2_m (vbool32_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m2_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m4_m(vbool16_t mask, uint64_t *base, ptrdiff_t bstride, - vuint64m4_t value, size_t vl) { +void test_vsse64_v_u64m4_m (vbool16_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m4_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, - vuint64m8_t value, size_t vl) { +void test_vsse64_v_u64m8_m (vbool8_t mask, uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { + return vsse16(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { + return vsse16(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // +void test_vsse16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return vsse16(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { + return vsse16(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { + return vsse16(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m8_m (vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { + return vsse16(mask, base, bstride, value, vl); +} + // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32mf2_m(vbool64_t mask, float *base, ptrdiff_t bstride, - vfloat32mf2_t value, size_t vl) { +void test_vsse32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m1_m(vbool32_t mask, float *base, ptrdiff_t bstride, - vfloat32m1_t value, size_t vl) { +void test_vsse32_v_f32m1_m (vbool32_t mask, float *base, ptrdiff_t bstride, vfloat32m1_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m2_m(vbool16_t mask, float *base, ptrdiff_t bstride, - vfloat32m2_t value, size_t vl) { +void test_vsse32_v_f32m2_m (vbool16_t mask, float *base, ptrdiff_t bstride, vfloat32m2_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m4_m(vbool8_t mask, float *base, ptrdiff_t bstride, - vfloat32m4_t value, size_t vl) { +void test_vsse32_v_f32m4_m (vbool8_t mask, float *base, ptrdiff_t bstride, vfloat32m4_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse32_v_f32m8_m(vbool4_t mask, float *base, ptrdiff_t bstride, - vfloat32m8_t value, size_t vl) { +void test_vsse32_v_f32m8_m (vbool4_t mask, float *base, ptrdiff_t bstride, vfloat32m8_t value, size_t vl) { return vsse32(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m1_m(vbool64_t mask, double *base, ptrdiff_t bstride, - vfloat64m1_t value, size_t vl) { +void test_vsse64_v_f64m1_m (vbool64_t mask, double *base, ptrdiff_t bstride, vfloat64m1_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m2_m(vbool32_t mask, double *base, ptrdiff_t bstride, - vfloat64m2_t value, size_t vl) { +void test_vsse64_v_f64m2_m (vbool32_t mask, double *base, ptrdiff_t bstride, vfloat64m2_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, - vfloat64m4_t value, size_t vl) { +void test_vsse64_v_f64m4_m (vbool16_t mask, double *base, ptrdiff_t bstride, vfloat64m4_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, - vfloat64m8_t value, size_t vl) { +void test_vsse64_v_f64m8_m (vbool8_t mask, double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { return vsse64(mask, base, bstride, value, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c index 7b3599b..3970f86 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssra.c @@ -447,489 +447,445 @@ vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { return vssra(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { - return vssra(mask, maskedoff, op1, shift, vl); + return vssra(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c index 6c6b9d3..e947b25 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssrl.c @@ -447,504 +447,460 @@ vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { return vssrl(op1, shift, vl); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { - return vssrl(mask, maskedoff, op1, shift, vl); + return vssrl(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c index 02ec458..1bf7a6f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vssub.c @@ -887,994 +887,906 @@ vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vssubu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vssub(mask, maskedoff, op1, op2, vl); + return vssub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vssubu(mask, maskedoff, op1, op2, vl); + return vssubu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c index 51d9848..3df67c8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsub.c @@ -884,882 +884,794 @@ vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vsub(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vsub(mask, maskedoff, op1, op2, vl); + return vsub(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c index 6c829d7..426844c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsuxei.c @@ -1,4608 +1,4249 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf8(int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsuxei8_v_i8mf8 (int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf4(int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsuxei8_v_i8mf4 (int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf2(int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsuxei8_v_i8mf2 (int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m1(int8_t *base, vuint8m1_t bindex, vint8m1_t value, - size_t vl) { +void test_vsuxei8_v_i8m1 (int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m2(int8_t *base, vuint8m2_t bindex, vint8m2_t value, - size_t vl) { +void test_vsuxei8_v_i8m2 (int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m4(int8_t *base, vuint8m4_t bindex, vint8m4_t value, - size_t vl) { +void test_vsuxei8_v_i8m4 (int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m8(int8_t *base, vuint8m8_t bindex, vint8m8_t value, - size_t vl) { +void test_vsuxei8_v_i8m8 (int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf8(int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsuxei16_v_i8mf8 (int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf4(int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsuxei16_v_i8mf4 (int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf2(int8_t *base, vuint16m1_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsuxei16_v_i8mf2 (int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m1(int8_t *base, vuint16m2_t bindex, vint8m1_t value, - size_t vl) { +void test_vsuxei16_v_i8m1 (int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m2(int8_t *base, vuint16m4_t bindex, vint8m2_t value, - size_t vl) { +void test_vsuxei16_v_i8m2 (int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m4(int8_t *base, vuint16m8_t bindex, vint8m4_t value, - size_t vl) { +void test_vsuxei16_v_i8m4 (int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf8(int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsuxei32_v_i8mf8 (int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf4(int8_t *base, vuint32m1_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsuxei32_v_i8mf4 (int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf2(int8_t *base, vuint32m2_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsuxei32_v_i8mf2 (int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m1(int8_t *base, vuint32m4_t bindex, vint8m1_t value, - size_t vl) { +void test_vsuxei32_v_i8m1 (int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m2(int8_t *base, vuint32m8_t bindex, vint8m2_t value, - size_t vl) { +void test_vsuxei32_v_i8m2 (int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf8(int8_t *base, vuint64m1_t bindex, vint8mf8_t value, - size_t vl) { +void test_vsuxei64_v_i8mf8 (int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf4(int8_t *base, vuint64m2_t bindex, vint8mf4_t value, - size_t vl) { +void test_vsuxei64_v_i8mf4 (int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf2(int8_t *base, vuint64m4_t bindex, vint8mf2_t value, - size_t vl) { +void test_vsuxei64_v_i8mf2 (int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8m1(int8_t *base, vuint64m8_t bindex, vint8m1_t value, - size_t vl) { +void test_vsuxei64_v_i8m1 (int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf4(int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, - size_t vl) { +void test_vsuxei8_v_i16mf4 (int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf2(int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, - size_t vl) { +void test_vsuxei8_v_i16mf2 (int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m1(int16_t *base, vuint8mf2_t bindex, vint16m1_t value, - size_t vl) { +void test_vsuxei8_v_i16m1 (int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m2(int16_t *base, vuint8m1_t bindex, vint16m2_t value, - size_t vl) { +void test_vsuxei8_v_i16m2 (int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m4(int16_t *base, vuint8m2_t bindex, vint16m4_t value, - size_t vl) { +void test_vsuxei8_v_i16m4 (int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m8(int16_t *base, vuint8m4_t bindex, vint16m8_t value, - size_t vl) { +void test_vsuxei8_v_i16m8 (int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf4(int16_t *base, vuint16mf4_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsuxei16_v_i16mf4 (int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf2(int16_t *base, vuint16mf2_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsuxei16_v_i16mf2 (int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m1(int16_t *base, vuint16m1_t bindex, vint16m1_t value, - size_t vl) { +void test_vsuxei16_v_i16m1 (int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m2(int16_t *base, vuint16m2_t bindex, vint16m2_t value, - size_t vl) { +void test_vsuxei16_v_i16m2 (int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m4(int16_t *base, vuint16m4_t bindex, vint16m4_t value, - size_t vl) { +void test_vsuxei16_v_i16m4 (int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m8(int16_t *base, vuint16m8_t bindex, vint16m8_t value, - size_t vl) { +void test_vsuxei16_v_i16m8 (int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf4(int16_t *base, vuint32mf2_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsuxei32_v_i16mf4 (int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf2(int16_t *base, vuint32m1_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsuxei32_v_i16mf2 (int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m1(int16_t *base, vuint32m2_t bindex, vint16m1_t value, - size_t vl) { +void test_vsuxei32_v_i16m1 (int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m2(int16_t *base, vuint32m4_t bindex, vint16m2_t value, - size_t vl) { +void test_vsuxei32_v_i16m2 (int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m4(int16_t *base, vuint32m8_t bindex, vint16m4_t value, - size_t vl) { +void test_vsuxei32_v_i16m4 (int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf4(int16_t *base, vuint64m1_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsuxei64_v_i16mf4 (int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf2(int16_t *base, vuint64m2_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsuxei64_v_i16mf2 (int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m1(int16_t *base, vuint64m4_t bindex, vint16m1_t value, - size_t vl) { +void test_vsuxei64_v_i16m1 (int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m2(int16_t *base, vuint64m8_t bindex, vint16m2_t value, - size_t vl) { +void test_vsuxei64_v_i16m2 (int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32mf2(int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, - size_t vl) { +void test_vsuxei8_v_i32mf2 (int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m1(int32_t *base, vuint8mf4_t bindex, vint32m1_t value, - size_t vl) { +void test_vsuxei8_v_i32m1 (int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m2(int32_t *base, vuint8mf2_t bindex, vint32m2_t value, - size_t vl) { +void test_vsuxei8_v_i32m2 (int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m4(int32_t *base, vuint8m1_t bindex, vint32m4_t value, - size_t vl) { +void test_vsuxei8_v_i32m4 (int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m8(int32_t *base, vuint8m2_t bindex, vint32m8_t value, - size_t vl) { +void test_vsuxei8_v_i32m8 (int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32mf2(int32_t *base, vuint16mf4_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsuxei16_v_i32mf2 (int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m1(int32_t *base, vuint16mf2_t bindex, vint32m1_t value, - size_t vl) { +void test_vsuxei16_v_i32m1 (int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m2(int32_t *base, vuint16m1_t bindex, vint32m2_t value, - size_t vl) { +void test_vsuxei16_v_i32m2 (int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m4(int32_t *base, vuint16m2_t bindex, vint32m4_t value, - size_t vl) { +void test_vsuxei16_v_i32m4 (int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m8(int32_t *base, vuint16m4_t bindex, vint32m8_t value, - size_t vl) { +void test_vsuxei16_v_i32m8 (int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32mf2(int32_t *base, vuint32mf2_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsuxei32_v_i32mf2 (int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m1(int32_t *base, vuint32m1_t bindex, vint32m1_t value, - size_t vl) { +void test_vsuxei32_v_i32m1 (int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m2(int32_t *base, vuint32m2_t bindex, vint32m2_t value, - size_t vl) { +void test_vsuxei32_v_i32m2 (int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m4(int32_t *base, vuint32m4_t bindex, vint32m4_t value, - size_t vl) { +void test_vsuxei32_v_i32m4 (int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m8(int32_t *base, vuint32m8_t bindex, vint32m8_t value, - size_t vl) { +void test_vsuxei32_v_i32m8 (int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32mf2(int32_t *base, vuint64m1_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsuxei64_v_i32mf2 (int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m1(int32_t *base, vuint64m2_t bindex, vint32m1_t value, - size_t vl) { +void test_vsuxei64_v_i32m1 (int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m2(int32_t *base, vuint64m4_t bindex, vint32m2_t value, - size_t vl) { +void test_vsuxei64_v_i32m2 (int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m4(int32_t *base, vuint64m8_t bindex, vint32m4_t value, - size_t vl) { +void test_vsuxei64_v_i32m4 (int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m1(int64_t *base, vuint8mf8_t bindex, vint64m1_t value, - size_t vl) { +void test_vsuxei8_v_i64m1 (int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m2(int64_t *base, vuint8mf4_t bindex, vint64m2_t value, - size_t vl) { +void test_vsuxei8_v_i64m2 (int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m4(int64_t *base, vuint8mf2_t bindex, vint64m4_t value, - size_t vl) { +void test_vsuxei8_v_i64m4 (int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m8(int64_t *base, vuint8m1_t bindex, vint64m8_t value, - size_t vl) { +void test_vsuxei8_v_i64m8 (int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m1(int64_t *base, vuint16mf4_t bindex, vint64m1_t value, - size_t vl) { +void test_vsuxei16_v_i64m1 (int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m2(int64_t *base, vuint16mf2_t bindex, vint64m2_t value, - size_t vl) { +void test_vsuxei16_v_i64m2 (int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m4(int64_t *base, vuint16m1_t bindex, vint64m4_t value, - size_t vl) { +void test_vsuxei16_v_i64m4 (int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m8(int64_t *base, vuint16m2_t bindex, vint64m8_t value, - size_t vl) { +void test_vsuxei16_v_i64m8 (int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m1(int64_t *base, vuint32mf2_t bindex, vint64m1_t value, - size_t vl) { +void test_vsuxei32_v_i64m1 (int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m2(int64_t *base, vuint32m1_t bindex, vint64m2_t value, - size_t vl) { +void test_vsuxei32_v_i64m2 (int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m4(int64_t *base, vuint32m2_t bindex, vint64m4_t value, - size_t vl) { +void test_vsuxei32_v_i64m4 (int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m8(int64_t *base, vuint32m4_t bindex, vint64m8_t value, - size_t vl) { +void test_vsuxei32_v_i64m8 (int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m1(int64_t *base, vuint64m1_t bindex, vint64m1_t value, - size_t vl) { +void test_vsuxei64_v_i64m1 (int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m2(int64_t *base, vuint64m2_t bindex, vint64m2_t value, - size_t vl) { +void test_vsuxei64_v_i64m2 (int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m4(int64_t *base, vuint64m4_t bindex, vint64m4_t value, - size_t vl) { +void test_vsuxei64_v_i64m4 (int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m8(int64_t *base, vuint64m8_t bindex, vint64m8_t value, - size_t vl) { +void test_vsuxei64_v_i64m8 (int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf8(uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, - size_t vl) { +void test_vsuxei8_v_u8mf8 (uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf4(uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, - size_t vl) { +void test_vsuxei8_v_u8mf4 (uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf2(uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsuxei8_v_u8mf2 (uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m1(uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsuxei8_v_u8m1 (uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m2(uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, - size_t vl) { +void test_vsuxei8_v_u8m2 (uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m4(uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, - size_t vl) { +void test_vsuxei8_v_u8m4 (uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m8(uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, - size_t vl) { +void test_vsuxei8_v_u8m8 (uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf8(uint8_t *base, vuint16mf4_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsuxei16_v_u8mf8 (uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf4(uint8_t *base, vuint16mf2_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsuxei16_v_u8mf4 (uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf2(uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsuxei16_v_u8mf2 (uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m1(uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsuxei16_v_u8m1 (uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m2(uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, - size_t vl) { +void test_vsuxei16_v_u8m2 (uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m4(uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, - size_t vl) { +void test_vsuxei16_v_u8m4 (uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf8(uint8_t *base, vuint32mf2_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsuxei32_v_u8mf8 (uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf4(uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, - size_t vl) { +void test_vsuxei32_v_u8mf4 (uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf2(uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsuxei32_v_u8mf2 (uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m1(uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsuxei32_v_u8m1 (uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m2(uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, - size_t vl) { +void test_vsuxei32_v_u8m2 (uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf8(uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, - size_t vl) { +void test_vsuxei64_v_u8mf8 (uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf4(uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, - size_t vl) { +void test_vsuxei64_v_u8mf4 (uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf2(uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, - size_t vl) { +void test_vsuxei64_v_u8mf2 (uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8m1(uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, - size_t vl) { +void test_vsuxei64_v_u8m1 (uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf4(uint16_t *base, vuint8mf8_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsuxei8_v_u16mf4 (uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf2(uint16_t *base, vuint8mf4_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsuxei8_v_u16mf2 (uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m1(uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, - size_t vl) { +void test_vsuxei8_v_u16m1 (uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m2(uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, - size_t vl) { +void test_vsuxei8_v_u16m2 (uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m4(uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, - size_t vl) { +void test_vsuxei8_v_u16m4 (uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m8(uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, - size_t vl) { +void test_vsuxei8_v_u16m8 (uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf4(uint16_t *base, vuint16mf4_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsuxei16_v_u16mf4 (uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf2(uint16_t *base, vuint16mf2_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsuxei16_v_u16mf2 (uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m1(uint16_t *base, vuint16m1_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei16_v_u16m1 (uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m2(uint16_t *base, vuint16m2_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei16_v_u16m2 (uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m4(uint16_t *base, vuint16m4_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsuxei16_v_u16m4 (uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m8(uint16_t *base, vuint16m8_t bindex, - vuint16m8_t value, size_t vl) { +void test_vsuxei16_v_u16m8 (uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf4(uint16_t *base, vuint32mf2_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsuxei32_v_u16mf4 (uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf2(uint16_t *base, vuint32m1_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsuxei32_v_u16mf2 (uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m1(uint16_t *base, vuint32m2_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei32_v_u16m1 (uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m2(uint16_t *base, vuint32m4_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei32_v_u16m2 (uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m4(uint16_t *base, vuint32m8_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsuxei32_v_u16m4 (uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf4(uint16_t *base, vuint64m1_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsuxei64_v_u16mf4 (uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf2(uint16_t *base, vuint64m2_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsuxei64_v_u16mf2 (uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m1(uint16_t *base, vuint64m4_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei64_v_u16m1 (uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m2(uint16_t *base, vuint64m8_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei64_v_u16m2 (uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32mf2(uint32_t *base, vuint8mf8_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsuxei8_v_u32mf2 (uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m1(uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, - size_t vl) { +void test_vsuxei8_v_u32m1 (uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m2(uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, - size_t vl) { +void test_vsuxei8_v_u32m2 (uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m4(uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, - size_t vl) { +void test_vsuxei8_v_u32m4 (uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m8(uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, - size_t vl) { +void test_vsuxei8_v_u32m8 (uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32mf2(uint32_t *base, vuint16mf4_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsuxei16_v_u32mf2 (uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m1(uint32_t *base, vuint16mf2_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsuxei16_v_u32m1 (uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m2(uint32_t *base, vuint16m1_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei16_v_u32m2 (uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m4(uint32_t *base, vuint16m2_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei16_v_u32m4 (uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m8(uint32_t *base, vuint16m4_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsuxei16_v_u32m8 (uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32mf2(uint32_t *base, vuint32mf2_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsuxei32_v_u32mf2 (uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m1(uint32_t *base, vuint32m1_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsuxei32_v_u32m1 (uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m2(uint32_t *base, vuint32m2_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei32_v_u32m2 (uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m4(uint32_t *base, vuint32m4_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei32_v_u32m4 (uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m8(uint32_t *base, vuint32m8_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsuxei32_v_u32m8 (uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32mf2(uint32_t *base, vuint64m1_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsuxei64_v_u32mf2 (uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m1(uint32_t *base, vuint64m2_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsuxei64_v_u32m1 (uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m2(uint32_t *base, vuint64m4_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei64_v_u32m2 (uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m4(uint32_t *base, vuint64m8_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei64_v_u32m4 (uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m1(uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, - size_t vl) { +void test_vsuxei8_v_u64m1 (uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m2(uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, - size_t vl) { +void test_vsuxei8_v_u64m2 (uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m4(uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, - size_t vl) { +void test_vsuxei8_v_u64m4 (uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m8(uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, - size_t vl) { +void test_vsuxei8_v_u64m8 (uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m1(uint64_t *base, vuint16mf4_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsuxei16_v_u64m1 (uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m2(uint64_t *base, vuint16mf2_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsuxei16_v_u64m2 (uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m4(uint64_t *base, vuint16m1_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei16_v_u64m4 (uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m8(uint64_t *base, vuint16m2_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei16_v_u64m8 (uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m1(uint64_t *base, vuint32mf2_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsuxei32_v_u64m1 (uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m2(uint64_t *base, vuint32m1_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsuxei32_v_u64m2 (uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m4(uint64_t *base, vuint32m2_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei32_v_u64m4 (uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m8(uint64_t *base, vuint32m4_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei32_v_u64m8 (uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m1(uint64_t *base, vuint64m1_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsuxei64_v_u64m1 (uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m2(uint64_t *base, vuint64m2_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsuxei64_v_u64m2 (uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m4(uint64_t *base, vuint64m4_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei64_v_u64m8 (uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32mf2(float *base, vuint8mf8_t bindex, vfloat32mf2_t value, - size_t vl) { +void test_vsuxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m1(float *base, vuint8mf4_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsuxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m2(float *base, vuint8mf2_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsuxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m4(float *base, vuint8m1_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsuxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m8(float *base, vuint8m2_t bindex, vfloat32m8_t value, - size_t vl) { +void test_vsuxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2( +void test_vsuxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32mf2(float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m1(float *base, vuint16mf2_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsuxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m2(float *base, vuint16m1_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsuxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m4(float *base, vuint16m2_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsuxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m8(float *base, vuint16m4_t bindex, vfloat32m8_t value, - size_t vl) { +void test_vsuxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2( +void test_vsuxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32mf2(float *base, vuint32mf2_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m1(float *base, vuint32m1_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsuxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m2(float *base, vuint32m2_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsuxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m4(float *base, vuint32m4_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsuxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8( +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m8(float *base, vuint32m8_t bindex, vfloat32m8_t value, - size_t vl) { +void test_vsuxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32mf2(float *base, vuint64m1_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m1(float *base, vuint64m2_t bindex, vfloat32m1_t value, - size_t vl) { +void test_vsuxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m2(float *base, vuint64m4_t bindex, vfloat32m2_t value, - size_t vl) { +void test_vsuxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m4(float *base, vuint64m8_t bindex, vfloat32m4_t value, - size_t vl) { +void test_vsuxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m1(double *base, vuint8mf8_t bindex, vfloat64m1_t value, - size_t vl) { +void test_vsuxei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m2(double *base, vuint8mf4_t bindex, vfloat64m2_t value, - size_t vl) { +void test_vsuxei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// -// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m8 (float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m8 (float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { + return vsuxei16(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f32m8 (float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { + return vsuxei32(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { + return vsuxei64(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { + return vsuxei8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m4(double *base, vuint8mf2_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsuxei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m8(double *base, vuint8m1_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsuxei8_v_f64m8 (double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m1(double *base, vuint16mf4_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsuxei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m2(double *base, vuint16mf2_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsuxei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m4(double *base, vuint16m1_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsuxei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m8(double *base, vuint16m2_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsuxei16_v_f64m8 (double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei16(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m1(double *base, vuint32mf2_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsuxei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m2(double *base, vuint32m1_t bindex, vfloat64m2_t value, - size_t vl) { +void test_vsuxei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m4(double *base, vuint32m2_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsuxei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m8(double *base, vuint32m4_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsuxei32_v_f64m8 (double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei32(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m1(double *base, vuint64m1_t bindex, vfloat64m1_t value, - size_t vl) { +void test_vsuxei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m2(double *base, vuint64m2_t bindex, vfloat64m2_t value, - size_t vl) { +void test_vsuxei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m4(double *base, vuint64m4_t bindex, vfloat64m4_t value, - size_t vl) { +void test_vsuxei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m8(double *base, vuint64m8_t bindex, vfloat64m8_t value, - size_t vl) { +void test_vsuxei64_v_f64m8 (double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei64(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint8mf8_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsuxei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint8mf4_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsuxei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint8mf2_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsuxei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m1_m(vbool8_t mask, int8_t *base, vuint8m1_t bindex, - vint8m1_t value, size_t vl) { +void test_vsuxei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m2_m(vbool4_t mask, int8_t *base, vuint8m2_t bindex, - vint8m2_t value, size_t vl) { +void test_vsuxei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m4_m(vbool2_t mask, int8_t *base, vuint8m4_t bindex, - vint8m4_t value, size_t vl) { +void test_vsuxei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i8m8_m(vbool1_t mask, int8_t *base, vuint8m8_t bindex, - vint8m8_t value, size_t vl) { +void test_vsuxei8_v_i8m8_m (vbool1_t mask, int8_t *base, vuint8m8_t bindex, vint8m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint16mf4_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsuxei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint16mf2_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsuxei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint16m1_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsuxei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m1_m(vbool8_t mask, int8_t *base, vuint16m2_t bindex, - vint8m1_t value, size_t vl) { +void test_vsuxei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m2_m(vbool4_t mask, int8_t *base, vuint16m4_t bindex, - vint8m2_t value, size_t vl) { +void test_vsuxei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i8m4_m(vbool2_t mask, int8_t *base, vuint16m8_t bindex, - vint8m4_t value, size_t vl) { +void test_vsuxei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint32mf2_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsuxei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint32m1_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsuxei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint32m2_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsuxei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m1_m(vbool8_t mask, int8_t *base, vuint32m4_t bindex, - vint8m1_t value, size_t vl) { +void test_vsuxei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i8m2_m(vbool4_t mask, int8_t *base, vuint32m8_t bindex, - vint8m2_t value, size_t vl) { +void test_vsuxei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf8_m(vbool64_t mask, int8_t *base, vuint64m1_t bindex, - vint8mf8_t value, size_t vl) { +void test_vsuxei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf4_m(vbool32_t mask, int8_t *base, vuint64m2_t bindex, - vint8mf4_t value, size_t vl) { +void test_vsuxei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8mf2_m(vbool16_t mask, int8_t *base, vuint64m4_t bindex, - vint8mf2_t value, size_t vl) { +void test_vsuxei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i8m1_m(vbool8_t mask, int8_t *base, vuint64m8_t bindex, - vint8m1_t value, size_t vl) { +void test_vsuxei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint8mf8_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsuxei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint8mf4_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsuxei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m1_m(vbool16_t mask, int16_t *base, vuint8mf2_t bindex, - vint16m1_t value, size_t vl) { +void test_vsuxei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m2_m(vbool8_t mask, int16_t *base, vuint8m1_t bindex, - vint16m2_t value, size_t vl) { +void test_vsuxei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m4_m(vbool4_t mask, int16_t *base, vuint8m2_t bindex, - vint16m4_t value, size_t vl) { +void test_vsuxei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i16m8_m(vbool2_t mask, int16_t *base, vuint8m4_t bindex, - vint16m8_t value, size_t vl) { +void test_vsuxei8_v_i16m8_m (vbool2_t mask, int16_t *base, vuint8m4_t bindex, vint16m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf4_m(vbool64_t mask, int16_t *base, - vuint16mf4_t bindex, vint16mf4_t value, - size_t vl) { +void test_vsuxei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16mf2_m(vbool32_t mask, int16_t *base, - vuint16mf2_t bindex, vint16mf2_t value, - size_t vl) { +void test_vsuxei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m1_m(vbool16_t mask, int16_t *base, vuint16m1_t bindex, - vint16m1_t value, size_t vl) { +void test_vsuxei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m2_m(vbool8_t mask, int16_t *base, vuint16m2_t bindex, - vint16m2_t value, size_t vl) { +void test_vsuxei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m4_m(vbool4_t mask, int16_t *base, vuint16m4_t bindex, - vint16m4_t value, size_t vl) { +void test_vsuxei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i16m8_m(vbool2_t mask, int16_t *base, vuint16m8_t bindex, - vint16m8_t value, size_t vl) { +void test_vsuxei16_v_i16m8_m (vbool2_t mask, int16_t *base, vuint16m8_t bindex, vint16m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf4_m(vbool64_t mask, int16_t *base, - vuint32mf2_t bindex, vint16mf4_t value, - size_t vl) { +void test_vsuxei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint32m1_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsuxei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m1_m(vbool16_t mask, int16_t *base, vuint32m2_t bindex, - vint16m1_t value, size_t vl) { +void test_vsuxei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m2_m(vbool8_t mask, int16_t *base, vuint32m4_t bindex, - vint16m2_t value, size_t vl) { +void test_vsuxei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i16m4_m(vbool4_t mask, int16_t *base, vuint32m8_t bindex, - vint16m4_t value, size_t vl) { +void test_vsuxei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf4_m(vbool64_t mask, int16_t *base, vuint64m1_t bindex, - vint16mf4_t value, size_t vl) { +void test_vsuxei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16mf2_m(vbool32_t mask, int16_t *base, vuint64m2_t bindex, - vint16mf2_t value, size_t vl) { +void test_vsuxei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m1_m(vbool16_t mask, int16_t *base, vuint64m4_t bindex, - vint16m1_t value, size_t vl) { +void test_vsuxei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i16m2_m(vbool8_t mask, int16_t *base, vuint64m8_t bindex, - vint16m2_t value, size_t vl) { +void test_vsuxei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint8mf8_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsuxei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m1_m(vbool32_t mask, int32_t *base, vuint8mf4_t bindex, - vint32m1_t value, size_t vl) { +void test_vsuxei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m2_m(vbool16_t mask, int32_t *base, vuint8mf2_t bindex, - vint32m2_t value, size_t vl) { +void test_vsuxei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m4_m(vbool8_t mask, int32_t *base, vuint8m1_t bindex, - vint32m4_t value, size_t vl) { +void test_vsuxei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i32m8_m(vbool4_t mask, int32_t *base, vuint8m2_t bindex, - vint32m8_t value, size_t vl) { +void test_vsuxei8_v_i32m8_m (vbool4_t mask, int32_t *base, vuint8m2_t bindex, vint32m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint16mf4_t bindex, vint32mf2_t value, - size_t vl) { +void test_vsuxei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m1_m(vbool32_t mask, int32_t *base, vuint16mf2_t bindex, - vint32m1_t value, size_t vl) { +void test_vsuxei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m2_m(vbool16_t mask, int32_t *base, vuint16m1_t bindex, - vint32m2_t value, size_t vl) { +void test_vsuxei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m4_m(vbool8_t mask, int32_t *base, vuint16m2_t bindex, - vint32m4_t value, size_t vl) { +void test_vsuxei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i32m8_m(vbool4_t mask, int32_t *base, vuint16m4_t bindex, - vint32m8_t value, size_t vl) { +void test_vsuxei16_v_i32m8_m (vbool4_t mask, int32_t *base, vuint16m4_t bindex, vint32m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32mf2_m(vbool64_t mask, int32_t *base, - vuint32mf2_t bindex, vint32mf2_t value, - size_t vl) { +void test_vsuxei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m1_m(vbool32_t mask, int32_t *base, vuint32m1_t bindex, - vint32m1_t value, size_t vl) { +void test_vsuxei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m2_m(vbool16_t mask, int32_t *base, vuint32m2_t bindex, - vint32m2_t value, size_t vl) { +void test_vsuxei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m4_m(vbool8_t mask, int32_t *base, vuint32m4_t bindex, - vint32m4_t value, size_t vl) { +void test_vsuxei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i32m8_m(vbool4_t mask, int32_t *base, vuint32m8_t bindex, - vint32m8_t value, size_t vl) { +void test_vsuxei32_v_i32m8_m (vbool4_t mask, int32_t *base, vuint32m8_t bindex, vint32m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32mf2_m(vbool64_t mask, int32_t *base, vuint64m1_t bindex, - vint32mf2_t value, size_t vl) { +void test_vsuxei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m1_m(vbool32_t mask, int32_t *base, vuint64m2_t bindex, - vint32m1_t value, size_t vl) { +void test_vsuxei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m2_m(vbool16_t mask, int32_t *base, vuint64m4_t bindex, - vint32m2_t value, size_t vl) { +void test_vsuxei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i32m4_m(vbool8_t mask, int32_t *base, vuint64m8_t bindex, - vint32m4_t value, size_t vl) { +void test_vsuxei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m1_m(vbool64_t mask, int64_t *base, vuint8mf8_t bindex, - vint64m1_t value, size_t vl) { +void test_vsuxei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m2_m(vbool32_t mask, int64_t *base, vuint8mf4_t bindex, - vint64m2_t value, size_t vl) { +void test_vsuxei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m4_m(vbool16_t mask, int64_t *base, vuint8mf2_t bindex, - vint64m4_t value, size_t vl) { +void test_vsuxei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_i64m8_m(vbool8_t mask, int64_t *base, vuint8m1_t bindex, - vint64m8_t value, size_t vl) { +void test_vsuxei8_v_i64m8_m (vbool8_t mask, int64_t *base, vuint8m1_t bindex, vint64m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m1_m(vbool64_t mask, int64_t *base, vuint16mf4_t bindex, - vint64m1_t value, size_t vl) { +void test_vsuxei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m2_m(vbool32_t mask, int64_t *base, vuint16mf2_t bindex, - vint64m2_t value, size_t vl) { +void test_vsuxei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m4_m(vbool16_t mask, int64_t *base, vuint16m1_t bindex, - vint64m4_t value, size_t vl) { +void test_vsuxei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_i64m8_m(vbool8_t mask, int64_t *base, vuint16m2_t bindex, - vint64m8_t value, size_t vl) { +void test_vsuxei16_v_i64m8_m (vbool8_t mask, int64_t *base, vuint16m2_t bindex, vint64m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m1_m(vbool64_t mask, int64_t *base, vuint32mf2_t bindex, - vint64m1_t value, size_t vl) { +void test_vsuxei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m2_m(vbool32_t mask, int64_t *base, vuint32m1_t bindex, - vint64m2_t value, size_t vl) { +void test_vsuxei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m4_m(vbool16_t mask, int64_t *base, vuint32m2_t bindex, - vint64m4_t value, size_t vl) { +void test_vsuxei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_i64m8_m(vbool8_t mask, int64_t *base, vuint32m4_t bindex, - vint64m8_t value, size_t vl) { +void test_vsuxei32_v_i64m8_m (vbool8_t mask, int64_t *base, vuint32m4_t bindex, vint64m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m1_m(vbool64_t mask, int64_t *base, vuint64m1_t bindex, - vint64m1_t value, size_t vl) { +void test_vsuxei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m2_m(vbool32_t mask, int64_t *base, vuint64m2_t bindex, - vint64m2_t value, size_t vl) { +void test_vsuxei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m4_m(vbool16_t mask, int64_t *base, vuint64m4_t bindex, - vint64m4_t value, size_t vl) { +void test_vsuxei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_i64m8_m(vbool8_t mask, int64_t *base, vuint64m8_t bindex, - vint64m8_t value, size_t vl) { +void test_vsuxei64_v_i64m8_m (vbool8_t mask, int64_t *base, vuint64m8_t bindex, vint64m8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsuxei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsuxei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsuxei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsuxei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t bindex, - vuint8m2_t value, size_t vl) { +void test_vsuxei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t bindex, - vuint8m4_t value, size_t vl) { +void test_vsuxei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t bindex, - vuint8m8_t value, size_t vl) { +void test_vsuxei8_v_u8m8_m (vbool1_t mask, uint8_t *base, vuint8m8_t bindex, vuint8m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsuxei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsuxei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint16m1_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsuxei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint16m2_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsuxei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint16m4_t bindex, - vuint8m2_t value, size_t vl) { +void test_vsuxei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint16m8_t bindex, - vuint8m4_t value, size_t vl) { +void test_vsuxei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsuxei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint32m1_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsuxei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint32m2_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsuxei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint32m4_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsuxei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint32m8_t bindex, - vuint8m2_t value, size_t vl) { +void test_vsuxei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint64m1_t bindex, - vuint8mf8_t value, size_t vl) { +void test_vsuxei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint64m2_t bindex, - vuint8mf4_t value, size_t vl) { +void test_vsuxei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint64m4_t bindex, - vuint8mf2_t value, size_t vl) { +void test_vsuxei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint64m8_t bindex, - vuint8m1_t value, size_t vl) { +void test_vsuxei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, - vuint16mf4_t value, size_t vl) { +void test_vsuxei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, - vuint16mf2_t value, size_t vl) { +void test_vsuxei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint8m1_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint8m2_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsuxei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint8m4_t bindex, - vuint16m8_t value, size_t vl) { +void test_vsuxei8_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint8m4_t bindex, vuint16m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf4_m(vbool64_t mask, uint16_t *base, - vuint16mf4_t bindex, vuint16mf4_t value, - size_t vl) { +void test_vsuxei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16mf2_m(vbool32_t mask, uint16_t *base, - vuint16mf2_t bindex, vuint16mf2_t value, - size_t vl) { +void test_vsuxei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsuxei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t bindex, - vuint16m8_t value, size_t vl) { +void test_vsuxei16_v_u16m8_m (vbool2_t mask, uint16_t *base, vuint16m8_t bindex, vuint16m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf4_m(vbool64_t mask, uint16_t *base, - vuint32mf2_t bindex, vuint16mf4_t value, - size_t vl) { +void test_vsuxei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16mf2_m(vbool32_t mask, uint16_t *base, - vuint32m1_t bindex, vuint16mf2_t value, - size_t vl) { +void test_vsuxei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint32m2_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint32m4_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint32m8_t bindex, - vuint16m4_t value, size_t vl) { +void test_vsuxei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf4_m(vbool64_t mask, uint16_t *base, - vuint64m1_t bindex, vuint16mf4_t value, - size_t vl) { +void test_vsuxei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16mf2_m(vbool32_t mask, uint16_t *base, - vuint64m2_t bindex, vuint16mf2_t value, - size_t vl) { +void test_vsuxei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint64m4_t bindex, - vuint16m1_t value, size_t vl) { +void test_vsuxei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint64m8_t bindex, - vuint16m2_t value, size_t vl) { +void test_vsuxei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, - vuint32mf2_t value, size_t vl) { +void test_vsuxei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsuxei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint8m1_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint8m2_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsuxei8_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint8m2_t bindex, vuint32m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint16mf4_t bindex, vuint32mf2_t value, - size_t vl) { +void test_vsuxei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m1_m(vbool32_t mask, uint32_t *base, - vuint16mf2_t bindex, vuint32m1_t value, - size_t vl) { +void test_vsuxei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint16m1_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint16m2_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint16m4_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsuxei16_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint16m4_t bindex, vuint32m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint32mf2_t bindex, vuint32mf2_t value, - size_t vl) { +void test_vsuxei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsuxei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t bindex, - vuint32m8_t value, size_t vl) { +void test_vsuxei32_v_u32m8_m (vbool4_t mask, uint32_t *base, vuint32m8_t bindex, vuint32m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32mf2_m(vbool64_t mask, uint32_t *base, - vuint64m1_t bindex, vuint32mf2_t value, - size_t vl) { +void test_vsuxei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint64m2_t bindex, - vuint32m1_t value, size_t vl) { +void test_vsuxei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint64m4_t bindex, - vuint32m2_t value, size_t vl) { +void test_vsuxei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint64m8_t bindex, - vuint32m4_t value, size_t vl) { +void test_vsuxei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsuxei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsuxei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint8m1_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei8_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint8m1_t bindex, vuint64m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint16mf4_t bindex, vuint64m1_t value, - size_t vl) { +void test_vsuxei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m2_m(vbool32_t mask, uint64_t *base, - vuint16mf2_t bindex, vuint64m2_t value, - size_t vl) { +void test_vsuxei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint16m1_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint16m2_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei16_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint16m2_t bindex, vuint64m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m1_m(vbool64_t mask, uint64_t *base, - vuint32mf2_t bindex, vuint64m1_t value, - size_t vl) { +void test_vsuxei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint32m1_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsuxei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint32m2_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint32m4_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei32_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint32m4_t bindex, vuint64m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t bindex, - vuint64m1_t value, size_t vl) { +void test_vsuxei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t bindex, - vuint64m2_t value, size_t vl) { +void test_vsuxei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t bindex, - vuint64m4_t value, size_t vl) { +void test_vsuxei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, - vuint64m8_t value, size_t vl) { +void test_vsuxei64_v_u64m8_m (vbool8_t mask, uint64_t *base, vuint64m8_t bindex, vuint64m8_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void // +void test_vsuxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64(mask, base, bindex, value, vl); +} + // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsuxei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsuxei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsuxei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, - vfloat32m8_t value, size_t vl) { +void test_vsuxei8_v_f32m8_m (vbool4_t mask, float *base, vuint8m2_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsuxei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsuxei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsuxei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, - vfloat32m8_t value, size_t vl) { +void test_vsuxei16_v_f32m8_m (vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32mf2_m(vbool64_t mask, float *base, vuint32mf2_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m1_m(vbool32_t mask, float *base, vuint32m1_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsuxei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m2_m(vbool16_t mask, float *base, vuint32m2_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsuxei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m4_m(vbool8_t mask, float *base, vuint32m4_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsuxei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f32m8_m(vbool4_t mask, float *base, vuint32m8_t bindex, - vfloat32m8_t value, size_t vl) { +void test_vsuxei32_v_f32m8_m (vbool4_t mask, float *base, vuint32m8_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32mf2_m(vbool64_t mask, float *base, vuint64m1_t bindex, - vfloat32mf2_t value, size_t vl) { +void test_vsuxei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m1_m(vbool32_t mask, float *base, vuint64m2_t bindex, - vfloat32m1_t value, size_t vl) { +void test_vsuxei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m2_m(vbool16_t mask, float *base, vuint64m4_t bindex, - vfloat32m2_t value, size_t vl) { +void test_vsuxei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f32m4_m(vbool8_t mask, float *base, vuint64m8_t bindex, - vfloat32m4_t value, size_t vl) { +void test_vsuxei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m1_m(vbool64_t mask, double *base, vuint8mf8_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsuxei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m2_m(vbool32_t mask, double *base, vuint8mf4_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsuxei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m4_m(vbool16_t mask, double *base, vuint8mf2_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsuxei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f64m8_m(vbool8_t mask, double *base, vuint8m1_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsuxei8_v_f64m8_m (vbool8_t mask, double *base, vuint8m1_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei8(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m1_m(vbool64_t mask, double *base, vuint16mf4_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsuxei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m2_m(vbool32_t mask, double *base, vuint16mf2_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsuxei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m4_m(vbool16_t mask, double *base, vuint16m1_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsuxei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f64m8_m(vbool8_t mask, double *base, vuint16m2_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsuxei16_v_f64m8_m (vbool8_t mask, double *base, vuint16m2_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei16(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m1_m(vbool64_t mask, double *base, vuint32mf2_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsuxei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m2_m(vbool32_t mask, double *base, vuint32m1_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsuxei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m4_m(vbool16_t mask, double *base, vuint32m2_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsuxei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei32_v_f64m8_m(vbool8_t mask, double *base, vuint32m4_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsuxei32_v_f64m8_m (vbool8_t mask, double *base, vuint32m4_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei32(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m1_m(vbool64_t mask, double *base, vuint64m1_t bindex, - vfloat64m1_t value, size_t vl) { +void test_vsuxei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m2_m(vbool32_t mask, double *base, vuint64m2_t bindex, - vfloat64m2_t value, size_t vl) { +void test_vsuxei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, - vfloat64m4_t value, size_t vl) { +void test_vsuxei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsuxei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei64(mask, base, bindex, value, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c index 62c5076..af01a77 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwadd.c @@ -1212,1350 +1212,1230 @@ vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { return vwaddu_wx(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwadd_vv(mask, maskedoff, op1, op2, vl); + return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwadd_vx(mask, maskedoff, op1, op2, vl); + return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwadd_wv(mask, maskedoff, op1, op2, vl); + return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwadd_wx(mask, maskedoff, op1, op2, vl); + return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_vv(mask, maskedoff, op1, op2, vl); + return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwaddu_vx(mask, maskedoff, op1, op2, vl); + return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwaddu_wv(mask, maskedoff, op1, op2, vl); + return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwaddu_wx(mask, maskedoff, op1, op2, vl); + return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c index 1166db5..94df022 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c @@ -304,303 +304,273 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { return vwcvtu_x(src, vl); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { - return vwcvt_x(mask, maskedoff, src, vl); + return vwcvt_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { - return vwcvtu_x(mask, maskedoff, src, vl); + return vwcvtu_x(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c index e985433..e07ef43 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmul.c @@ -904,902 +904,812 @@ vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { return vwmulsu(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwmul(mask, maskedoff, op1, op2, vl); + return vwmul(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulu(mask, maskedoff, op1, op2, vl); + return vwmulu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { - return vwmulsu(mask, maskedoff, op1, op2, vl); + return vwmulsu(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c index 7d94594..eb883d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwsub.c @@ -1212,1350 +1212,1230 @@ vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { return vwsubu_wx(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vwsub_vv(mask, maskedoff, op1, op2, vl); + return vwsub_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vwsub_vx(mask, maskedoff, op1, op2, vl); + return vwsub_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { - return vwsub_wv(mask, maskedoff, op1, op2, vl); + return vwsub_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { - return vwsub_wx(mask, maskedoff, op1, op2, vl); + return vwsub_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_vv(mask, maskedoff, op1, op2, vl); + return vwsubu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vwsubu_vx(mask, maskedoff, op1, op2, vl); + return vwsubu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { - return vwsubu_wv(mask, maskedoff, op1, op2, vl); + return vwsubu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { - return vwsubu_wx(mask, maskedoff, op1, op2, vl); + return vwsubu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c index 047093a..5dc40ef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vxor.c @@ -884,882 +884,794 @@ vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { return vxor(op1, op2, vl); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { - return vxor(mask, maskedoff, op1, op2, vl); + return vxor(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c index 05a60c5..0d46b81 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c @@ -284,310 +284,282 @@ vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { return vzext_vf2(op1, vl); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return vzext_vf8(mask, maskedoff, op1, vl); + return vzext_vf8(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return vzext_vf4(mask, maskedoff, op1, vl); + return vzext_vf4(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { - return vzext_vf2(mask, maskedoff, op1, vl); + return vzext_vf2(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c index d56e97d..90cde17 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaadd.c @@ -795,910 +795,1591 @@ vuint64m8_t test_vaaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vaaddu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vaadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vaadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vaadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vaadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vaadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vaadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vaadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vaadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vaadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vaadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vaadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vaadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vaadd_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vaadd_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vaadd_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vaadd_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vaadd_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vaadd_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vaadd_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vaadd_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vaadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vaadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vaadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vaadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vaadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vaadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vaadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vaadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vaadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vaadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vaadd_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vaadd_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vaadd_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vaadd_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vaadd_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vaadd_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vaadd_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vaadd_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vaadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vaadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vaadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vaadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vaadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vaadd_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vaadd_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vaadd_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vaadd_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vaadd_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vaadd_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vaadd_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vaadd_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vaadd_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vaadd_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vaadd_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vaadd_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vaadd_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vaadd_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vaadd_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vaadd_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vaaddu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vaaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vaaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vaaddu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vaaddu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vaaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vaaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vaaddu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vaaddu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vaaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vaaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vaaddu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vaaddu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vaaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vaaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vaaddu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vaaddu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vaaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vaaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vaaddu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vaaddu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vaaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vaaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vaaddu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vaaddu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vaaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vaaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vaaddu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vaaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vaaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vaaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vaaddu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vaaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vaaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vaaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vaaddu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vaaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vaaddu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vaaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vaaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vaaddu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vaaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vaaddu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vaaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vaaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vaaddu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vaaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vaaddu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vaaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vaaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vaaddu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vaaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vaaddu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vaaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vaaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vaaddu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vaaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vaaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vaaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vaaddu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vaaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vaaddu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vaaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vaaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vaaddu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vaaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vaaddu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vaaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vaaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vaaddu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vaaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vaaddu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vaaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vaaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vaaddu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vaaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vaaddu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vaaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vaaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vaaddu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vaaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vaaddu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vaaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vaaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vaaddu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vaaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vaaddu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vaaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vaaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vaaddu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vaaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vaaddu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vaaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vaaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vaaddu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vaaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vaaddu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vaaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vaaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vaaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vaadd_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vaadd_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vaadd_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vaadd_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vaadd_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vaadd_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vaadd_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vaadd_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vaadd_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vaadd_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vaadd_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vaadd_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vaadd_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vaadd_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vaadd_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vaadd_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vaadd_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vaadd_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vaadd_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vaadd_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vaadd_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vaadd_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaadd_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vaadd_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vaadd_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vaaddu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vaaddu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vaaddu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vaaddu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vaaddu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vaaddu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vaaddu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vaaddu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vaaddu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vaaddu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vaaddu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vaaddu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vaaddu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vaaddu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vaaddu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vaaddu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vaaddu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vaaddu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vaaddu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vaaddu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vaaddu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vaaddu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vaaddu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vaaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vaaddu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vaaddu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c new file mode 100644 index 0000000..c90e703 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd-policy.c @@ -0,0 +1,44 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s + +#include + + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.nxv8i8.nxv8i8.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vadd_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_UNDISTURBED); +} + +// CHECK-RV64-LABEL: @test_vadd_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_ta(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c index 09f3d85..568a793 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vadd.c @@ -1,7 +1,8 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ -// RUN: -target-feature +experimental-zfh -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -799,7 +800,7 @@ vuint64m8_t test_vadd_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { @@ -808,7 +809,7 @@ vint8mf8_t test_vadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { @@ -817,7 +818,7 @@ vint8mf8_t test_vadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t // CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { @@ -826,7 +827,7 @@ vint8mf4_t test_vadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { @@ -835,7 +836,7 @@ vint8mf4_t test_vadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t // CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { @@ -844,7 +845,7 @@ vint8mf2_t test_vadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { @@ -853,7 +854,7 @@ vint8mf2_t test_vadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t // CHECK-RV64-LABEL: @test_vadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { @@ -862,7 +863,7 @@ vint8m1_t test_vadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // CHECK-RV64-LABEL: @test_vadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { @@ -871,7 +872,7 @@ vint8m1_t test_vadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, // CHECK-RV64-LABEL: @test_vadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { @@ -880,7 +881,7 @@ vint8m2_t test_vadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // CHECK-RV64-LABEL: @test_vadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { @@ -889,7 +890,7 @@ vint8m2_t test_vadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, // CHECK-RV64-LABEL: @test_vadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { @@ -898,7 +899,7 @@ vint8m4_t test_vadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // CHECK-RV64-LABEL: @test_vadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { @@ -907,7 +908,7 @@ vint8m4_t test_vadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, // CHECK-RV64-LABEL: @test_vadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { @@ -916,7 +917,7 @@ vint8m8_t test_vadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // CHECK-RV64-LABEL: @test_vadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { @@ -925,7 +926,7 @@ vint8m8_t test_vadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, // CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { @@ -934,7 +935,7 @@ vint16mf4_t test_vadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { @@ -943,7 +944,7 @@ vint16mf4_t test_vadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16m // CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { @@ -952,7 +953,7 @@ vint16mf2_t test_vadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { @@ -961,7 +962,7 @@ vint16mf2_t test_vadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16m // CHECK-RV64-LABEL: @test_vadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { @@ -970,7 +971,7 @@ vint16m1_t test_vadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // CHECK-RV64-LABEL: @test_vadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { @@ -979,7 +980,7 @@ vint16m1_t test_vadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t // CHECK-RV64-LABEL: @test_vadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { @@ -988,7 +989,7 @@ vint16m2_t test_vadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-LABEL: @test_vadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { @@ -997,7 +998,7 @@ vint16m2_t test_vadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t // CHECK-RV64-LABEL: @test_vadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { @@ -1006,7 +1007,7 @@ vint16m4_t test_vadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-LABEL: @test_vadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { @@ -1015,7 +1016,7 @@ vint16m4_t test_vadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t // CHECK-RV64-LABEL: @test_vadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { @@ -1024,7 +1025,7 @@ vint16m8_t test_vadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-LABEL: @test_vadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { @@ -1033,7 +1034,7 @@ vint16m8_t test_vadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t // CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { @@ -1042,7 +1043,7 @@ vint32mf2_t test_vadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { @@ -1051,7 +1052,7 @@ vint32mf2_t test_vadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32m // CHECK-RV64-LABEL: @test_vadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { @@ -1060,7 +1061,7 @@ vint32m1_t test_vadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // CHECK-RV64-LABEL: @test_vadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { @@ -1069,7 +1070,7 @@ vint32m1_t test_vadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t // CHECK-RV64-LABEL: @test_vadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { @@ -1078,7 +1079,7 @@ vint32m2_t test_vadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // CHECK-RV64-LABEL: @test_vadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { @@ -1087,7 +1088,7 @@ vint32m2_t test_vadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t // CHECK-RV64-LABEL: @test_vadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { @@ -1096,7 +1097,7 @@ vint32m4_t test_vadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-LABEL: @test_vadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { @@ -1105,7 +1106,7 @@ vint32m4_t test_vadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t // CHECK-RV64-LABEL: @test_vadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { @@ -1114,7 +1115,7 @@ vint32m8_t test_vadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-LABEL: @test_vadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { @@ -1123,7 +1124,7 @@ vint32m8_t test_vadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t // CHECK-RV64-LABEL: @test_vadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { @@ -1132,7 +1133,7 @@ vint64m1_t test_vadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // CHECK-RV64-LABEL: @test_vadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { @@ -1141,7 +1142,7 @@ vint64m1_t test_vadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t // CHECK-RV64-LABEL: @test_vadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { @@ -1150,7 +1151,7 @@ vint64m2_t test_vadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // CHECK-RV64-LABEL: @test_vadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { @@ -1159,7 +1160,7 @@ vint64m2_t test_vadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t // CHECK-RV64-LABEL: @test_vadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { @@ -1168,7 +1169,7 @@ vint64m4_t test_vadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-LABEL: @test_vadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { @@ -1177,7 +1178,7 @@ vint64m4_t test_vadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-LABEL: @test_vadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { @@ -1186,7 +1187,7 @@ vint64m8_t test_vadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-LABEL: @test_vadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { @@ -1195,7 +1196,7 @@ vint64m8_t test_vadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t // CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { @@ -1204,7 +1205,7 @@ vuint8mf8_t test_vadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { @@ -1213,7 +1214,7 @@ vuint8mf8_t test_vadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf // CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { @@ -1222,7 +1223,7 @@ vuint8mf4_t test_vadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { @@ -1231,7 +1232,7 @@ vuint8mf4_t test_vadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf // CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { @@ -1240,7 +1241,7 @@ vuint8mf2_t test_vadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { @@ -1249,7 +1250,7 @@ vuint8mf2_t test_vadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf // CHECK-RV64-LABEL: @test_vadd_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { @@ -1258,7 +1259,7 @@ vuint8m1_t test_vadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // CHECK-RV64-LABEL: @test_vadd_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { @@ -1267,7 +1268,7 @@ vuint8m1_t test_vadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t o // CHECK-RV64-LABEL: @test_vadd_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { @@ -1276,7 +1277,7 @@ vuint8m2_t test_vadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // CHECK-RV64-LABEL: @test_vadd_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { @@ -1285,7 +1286,7 @@ vuint8m2_t test_vadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t o // CHECK-RV64-LABEL: @test_vadd_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { @@ -1294,7 +1295,7 @@ vuint8m4_t test_vadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // CHECK-RV64-LABEL: @test_vadd_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { @@ -1303,7 +1304,7 @@ vuint8m4_t test_vadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t o // CHECK-RV64-LABEL: @test_vadd_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { @@ -1312,7 +1313,7 @@ vuint8m8_t test_vadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // CHECK-RV64-LABEL: @test_vadd_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { @@ -1321,7 +1322,7 @@ vuint8m8_t test_vadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t o // CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { @@ -1330,7 +1331,7 @@ vuint16mf4_t test_vadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { @@ -1339,7 +1340,7 @@ vuint16mf4_t test_vadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint // CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { @@ -1348,7 +1349,7 @@ vuint16mf2_t test_vadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { @@ -1357,7 +1358,7 @@ vuint16mf2_t test_vadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint // CHECK-RV64-LABEL: @test_vadd_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { @@ -1366,7 +1367,7 @@ vuint16m1_t test_vadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // CHECK-RV64-LABEL: @test_vadd_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { @@ -1375,7 +1376,7 @@ vuint16m1_t test_vadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m // CHECK-RV64-LABEL: @test_vadd_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { @@ -1384,7 +1385,7 @@ vuint16m2_t test_vadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // CHECK-RV64-LABEL: @test_vadd_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { @@ -1393,7 +1394,7 @@ vuint16m2_t test_vadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2 // CHECK-RV64-LABEL: @test_vadd_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { @@ -1402,7 +1403,7 @@ vuint16m4_t test_vadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // CHECK-RV64-LABEL: @test_vadd_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { @@ -1411,7 +1412,7 @@ vuint16m4_t test_vadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4 // CHECK-RV64-LABEL: @test_vadd_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { @@ -1420,7 +1421,7 @@ vuint16m8_t test_vadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // CHECK-RV64-LABEL: @test_vadd_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { @@ -1429,7 +1430,7 @@ vuint16m8_t test_vadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8 // CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { @@ -1438,7 +1439,7 @@ vuint32mf2_t test_vadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { @@ -1447,7 +1448,7 @@ vuint32mf2_t test_vadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint // CHECK-RV64-LABEL: @test_vadd_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { @@ -1456,7 +1457,7 @@ vuint32m1_t test_vadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // CHECK-RV64-LABEL: @test_vadd_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { @@ -1465,7 +1466,7 @@ vuint32m1_t test_vadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m // CHECK-RV64-LABEL: @test_vadd_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { @@ -1474,7 +1475,7 @@ vuint32m2_t test_vadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // CHECK-RV64-LABEL: @test_vadd_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { @@ -1483,7 +1484,7 @@ vuint32m2_t test_vadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m // CHECK-RV64-LABEL: @test_vadd_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { @@ -1492,7 +1493,7 @@ vuint32m4_t test_vadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // CHECK-RV64-LABEL: @test_vadd_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { @@ -1501,7 +1502,7 @@ vuint32m4_t test_vadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4 // CHECK-RV64-LABEL: @test_vadd_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { @@ -1510,7 +1511,7 @@ vuint32m8_t test_vadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // CHECK-RV64-LABEL: @test_vadd_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { @@ -1519,7 +1520,7 @@ vuint32m8_t test_vadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8 // CHECK-RV64-LABEL: @test_vadd_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { @@ -1528,7 +1529,7 @@ vuint64m1_t test_vadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vadd_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { @@ -1537,7 +1538,7 @@ vuint64m1_t test_vadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vadd_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { @@ -1546,7 +1547,7 @@ vuint64m2_t test_vadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vadd_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { @@ -1555,7 +1556,7 @@ vuint64m2_t test_vadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vadd_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { @@ -1564,7 +1565,7 @@ vuint64m4_t test_vadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vadd_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { @@ -1573,7 +1574,7 @@ vuint64m4_t test_vadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vadd_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { @@ -1582,10 +1583,802 @@ vuint64m8_t test_vadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8 // CHECK-RV64-LABEL: @test_vadd_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vadd_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vv_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vadd_vx_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vv_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vadd_vx_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vv_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vadd_vx_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vv_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vadd_vx_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vv_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vadd_vx_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vv_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vadd_vx_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vv_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vadd_vx_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vadd_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vadd_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vadd_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vadd_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vadd_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vadd_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vadd_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vadd_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vadd_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vadd_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vadd_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vadd_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vadd_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vadd_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vadd_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vv_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vadd_vx_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vv_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vadd_vx_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vv_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vadd_vx_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vv_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vadd_vx_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vv_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vadd_vx_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vv_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vadd_vx_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vv_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vadd_vx_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vadd_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vadd_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vadd_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vadd_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vadd_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vadd_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vadd_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vadd_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vadd_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vadd_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vadd_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vadd_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vadd_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vadd_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vadd_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vadd_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vadd_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vadd_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c index 2e05ccb..3e39af2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vand.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vand_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vand_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vand_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vand_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vand_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vand_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vand_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vand_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vand_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vand_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vand_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vand_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vand_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vand_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vand_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vand_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vand_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vand_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vand_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vand_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vand_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vand_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vand_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vand_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vand_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vand_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vand_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vand_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vand_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vand_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vand_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vand_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vand_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vand_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vand_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vand_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vand_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vand_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vand_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vand_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vand_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vand_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vand_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vand_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vand_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vand_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vand_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vand_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vand_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vand_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vand_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vand_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vand_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vand_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vand_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vand_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vand_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vand_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vand_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vand_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vand_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vand_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vand_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vand_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vand_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vand_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vand_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vand_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vand_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vand_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vand_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vand_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vand_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vand_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vand_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vand_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vand_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vand_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vand_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vand_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vand_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vand_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vand_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vand_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vand_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vand_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vand_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vand_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vand_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vand_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vand_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vand_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vand_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vand_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vand_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vand_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vand_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vand_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vand_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vand_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vand_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vand_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vand_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vand_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vand_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vand_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vand_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vand_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vand_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vand_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vand_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vand_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vand_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vand_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vand_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vand_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vand_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vand_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vand_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vand_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vand_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vand_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vand_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vand_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vand_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vand_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vand_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vand_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vand_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vand_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vand_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vand_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vand_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vand_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vand_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vand_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vand_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vand_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vand_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vand_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vand_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vand_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vand_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vand_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vand_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vand_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vand_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vand_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vand_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vand_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vand_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vand_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vand_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vand_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vand_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vand_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vand_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vand_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vand_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vand_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vand_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vand_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vand_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vand_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vand_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vand_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vand_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vand_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vand_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vand_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vand_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vand_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vand_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vand_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vand_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vand_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vand_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vand_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vand_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vand_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vand_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vand_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vand_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vand_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vand_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vand_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vand_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vand_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vand_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vand_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vand_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vand_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vand_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vand_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vand_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vand_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vand_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vand_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vand_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vand_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vand_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vand_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vand_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vand_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vand_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vand_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vand_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vand_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vand_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vand_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vand_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vand_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vand_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vand_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vand_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vand_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vand_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vand_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vand_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vand_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vand_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vand_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vand_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vand_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vand_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vand_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vand_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vand_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vand_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vand_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vand_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vand_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vand_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vand_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vand_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vand_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vand_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vand_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vand_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vand_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vand_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vand_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vand_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vand_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vand_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vand_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vand_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vand_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vand_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vand_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vand_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vand_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vand_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vand_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vand_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vand_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vand_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vand_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vand_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vand_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vand_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vand_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vand_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vand_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vand_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vand_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vand_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vand_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vand_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vand_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vand_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vand_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vand_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vand_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vand_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vand_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vand_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vand_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vand_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vand_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vand_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vand_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vand_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vand_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vand_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vand_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vand_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vand_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vand_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vand_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vand_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vand_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vand_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vand_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vand_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vand_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vand_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vand_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vand_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vand_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vand_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vand_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vand_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vand_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vand_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vand_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vand.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vand_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vand_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c index 9ce439b..4ade9ef 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vasub.c @@ -795,910 +795,1591 @@ vuint64m8_t test_vasubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vasubu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vasub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vasub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vasub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vasub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vasub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vasub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vasub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vasub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vasub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vasub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vasub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vasub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vasub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vasub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vasub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vasub_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vasub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vasub_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vasub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vasub_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vasub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vasub_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vasub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vasub_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vasub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vasub_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vasub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vasub_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vasub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vasub_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vasub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vasub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vasub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vasub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vasub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vasub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vasub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vasub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vasub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vasub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vasub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vasub_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vasub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vasub_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vasub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vasub_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vasub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vasub_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vasub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vasub_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vasub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vasub_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vasub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vasub_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vasub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vasub_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vasub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vasub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vasub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vasub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vasub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vasub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vasub_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vasub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vasub_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vasub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vasub_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vasub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vasub_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vasub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vasub_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vasub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vasub_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vasub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vasub_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vasub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vasub_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vasub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vasub_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vasub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vasub_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vasub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vasub_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vasub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vasub_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vasub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vasub_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vasub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vasub_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vasub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vasub_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vasub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vasub_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vasub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vasubu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vasubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vasubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vasubu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vasubu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vasubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vasubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vasubu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vasubu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vasubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vasubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vasubu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vasubu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vasubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vasubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vasubu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vasubu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vasubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vasubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vasubu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vasubu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vasubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vasubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vasubu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vasubu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vasubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vasubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vasubu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vasubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vasubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vasubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vasubu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vasubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vasubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vasubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vasubu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vasubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vasubu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vasubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vasubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vasubu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vasubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vasubu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vasubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vasubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vasubu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vasubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vasubu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vasubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vasubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vasubu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vasubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vasubu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vasubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vasubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vasubu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vasubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vasubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vasubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vasubu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vasubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vasubu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vasubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vasubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vasubu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vasubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vasubu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vasubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vasubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vasubu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vasubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vasubu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vasubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vasubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vasubu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vasubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vasubu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vasubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vasubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vasubu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vasubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vasubu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vasubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vasubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vasubu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vasubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vasubu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vasubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vasubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vasubu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vasubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vasubu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vasubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vasubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vasubu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vasubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vasubu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vasubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vasubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vasubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vasub_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vasub_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vasub_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vasub_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vasub_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vasub_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vasub_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vasub_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vasub_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vasub_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vasub_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vasub_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vasub_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vasub_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vasub_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vasub_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vasub_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vasub_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vasub_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vasub_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vasub_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vasub_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vasub_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vasub_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vasubu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vasubu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vasubu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vasubu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vasubu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vasubu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vasubu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vasubu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vasubu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vasubu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vasubu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vasubu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vasubu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vasubu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vasubu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vasubu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vasubu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vasubu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vasubu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vasubu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vasubu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vasubu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vasubu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vasubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vasubu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vasubu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c index 9277d8d..9f50112 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vcompress.c @@ -402,6 +402,60 @@ vuint64m8_t test_vcompress_vm_u64m8 (vbool8_t mask, vuint64m8_t dest, vuint64m8_ return vcompress_vm_u64m8(mask, dest, src, vl); } +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vcompress_vm_f16mf4 (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { + return vcompress_vm_f16mf4(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vcompress_vm_f16mf2 (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { + return vcompress_vm_f16mf2(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vcompress_vm_f16m1 (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { + return vcompress_vm_f16m1(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vcompress_vm_f16m2 (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { + return vcompress_vm_f16m2(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vcompress_vm_f16m4 (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { + return vcompress_vm_f16m4(mask, dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { + return vcompress_vm_f16m8(mask, dest, src, vl); +} + // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f32.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) @@ -482,57 +536,3 @@ vfloat64m4_t test_vcompress_vm_f64m4 (vbool16_t mask, vfloat64m4_t dest, vfloat6 vfloat64m8_t test_vcompress_vm_f64m8 (vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) { return vcompress_vm_f64m8(mask, dest, src, vl); } - -// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv1f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vcompress_vm_f16mf4 (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) { - return vcompress_vm_f16mf4(mask, dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv2f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vcompress_vm_f16mf2 (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) { - return vcompress_vm_f16mf2(mask, dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv4f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vcompress_vm_f16m1 (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) { - return vcompress_vm_f16m1(mask, dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv8f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vcompress_vm_f16m2 (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) { - return vcompress_vm_f16m2(mask, dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv16f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vcompress_vm_f16m4 (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) { - return vcompress_vm_f16m4(mask, dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vcompress_vm_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vcompress.nxv32f16.i64( [[DEST:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vcompress_vm_f16m8 (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) { - return vcompress_vm_f16m8(mask, dest, src, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c index 19b99ba..c6d382cc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vdiv.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vdivu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vdivu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vdivu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vdiv_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vdiv_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vdiv_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vdiv_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vdiv_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vdiv_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vdiv_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vdiv_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vdiv_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vdiv_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vdiv_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vdiv_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vdiv_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vdiv_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vdiv_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vdiv_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vdiv_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vdiv_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vdiv_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vdiv_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vdiv_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vdiv_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vdiv_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vdiv_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vdiv_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vdiv_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vdiv_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vdiv_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vdiv_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vdiv_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vdiv_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vdiv_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vdiv_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vdiv_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vdiv_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vdiv_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vdiv_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vdiv_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vdiv_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vdiv_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vdiv_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vdiv_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vdiv_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vdiv_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vdiv_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vdiv_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vdiv_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vdiv_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vdiv_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vdiv_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vdiv_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vdiv_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vdiv_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vdiv_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vdiv_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vdiv_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vdiv_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vdiv_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vdiv_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vdiv_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vdiv_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vdiv_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vdiv_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vdiv_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vdiv_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vdiv_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vdiv_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vdiv_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vdiv_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vdiv_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vdiv_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vdiv_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vdiv_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vdiv_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vdiv_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vdiv_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vdiv_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vdiv_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vdiv_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vdiv_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vdiv_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vdiv_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vdiv_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vdiv_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vdiv_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vdiv_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vdiv_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vdiv_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vdiv_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vdiv_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vdiv_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vdiv_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vdiv_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vdiv_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vdiv_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vdiv_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vdiv_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vdiv_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vdiv_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vdiv_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vdiv_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vdiv_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vdiv_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vdiv_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vdivu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vdivu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vdivu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vdivu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vdivu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vdivu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vdivu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vdivu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vdivu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vdivu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vdivu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vdivu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vdivu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vdivu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vdivu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vdivu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vdivu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vdivu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vdivu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vdivu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vdivu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vdivu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vdivu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vdivu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vdivu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vdivu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vdivu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vdivu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vdivu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vdivu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vdivu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vdivu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vdivu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vdivu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vdivu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vdivu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vdivu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vdivu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vdivu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vdivu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vdivu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vdivu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vdivu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vdivu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vdivu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vdivu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vdivu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vdivu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vdivu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vdivu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vdivu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vdivu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vdivu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vdivu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vdivu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vdivu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vdivu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vdivu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vdivu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vdivu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vdivu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vdivu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vdivu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vdivu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vdivu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vdivu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vdivu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vdivu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vdivu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vdivu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vdivu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vdivu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vdivu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vdivu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vdivu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vdivu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vdivu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vdivu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vdivu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vdivu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vdivu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vdivu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vdivu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vdivu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vdivu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vdivu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vdivu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vdivu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vdivu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vdivu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vdivu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vdivu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vdivu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vdivu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vdivu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vdivu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vdivu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vdivu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vdivu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vdivu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vdivu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vdivu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vdivu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vdivu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vdiv_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vdiv_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vdiv_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vdiv_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vdiv_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vdiv_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vdiv_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vdiv_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vdiv_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vdiv_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vdiv_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vdiv_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vdiv_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vdiv_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vdiv_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vdiv_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vdiv_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vdiv_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vdiv_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vdiv_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vdiv_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vdiv_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdiv_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdiv.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vdiv_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vdiv_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vdivu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vdivu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vdivu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vdivu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vdivu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vdivu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vdivu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vdivu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vdivu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vdivu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vdivu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vdivu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vdivu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vdivu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vdivu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vdivu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vdivu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vdivu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vdivu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vdivu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vdivu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vdivu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vdivu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vdivu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vdivu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vdivu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c index 486b4c8..e80e65d2 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c @@ -89,7 +89,7 @@ vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { // CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { @@ -98,7 +98,7 @@ vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf // CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { @@ -107,7 +107,7 @@ vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { @@ -116,7 +116,7 @@ vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { @@ -125,7 +125,7 @@ vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { @@ -134,7 +134,7 @@ vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { @@ -143,7 +143,7 @@ vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { @@ -152,7 +152,7 @@ vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { @@ -161,7 +161,7 @@ vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { @@ -275,3 +275,138 @@ vfloat16m4_t test_vfabs_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat vfloat16m8_t test_vfabs_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { return vfabs_v_f16m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfabs_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfabs_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfabs_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfabs_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfabs_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfabs_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfabs_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfabs_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfabs_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfabs_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfabs_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfabs_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfabs_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfabs_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, uint8_t ta) { + return vfabs_v_f64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c index 1c993d8..31fef44 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfadd.c @@ -278,7 +278,7 @@ vfloat64m8_t test_vfadd_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { @@ -287,7 +287,7 @@ vfloat16mf4_t test_vfadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { @@ -296,7 +296,7 @@ vfloat16mf4_t test_vfadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, v // CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { @@ -305,7 +305,7 @@ vfloat16mf2_t test_vfadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { @@ -314,7 +314,7 @@ vfloat16mf2_t test_vfadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, v // CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { @@ -323,7 +323,7 @@ vfloat16m1_t test_vfadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { @@ -332,7 +332,7 @@ vfloat16m1_t test_vfadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { @@ -341,7 +341,7 @@ vfloat16m2_t test_vfadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { @@ -350,7 +350,7 @@ vfloat16m2_t test_vfadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { @@ -359,7 +359,7 @@ vfloat16m4_t test_vfadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { @@ -368,7 +368,7 @@ vfloat16m4_t test_vfadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { @@ -377,7 +377,7 @@ vfloat16m8_t test_vfadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { @@ -386,7 +386,7 @@ vfloat16m8_t test_vfadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { @@ -395,7 +395,7 @@ vfloat32mf2_t test_vfadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { @@ -404,7 +404,7 @@ vfloat32mf2_t test_vfadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, v // CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { @@ -413,7 +413,7 @@ vfloat32m1_t test_vfadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { @@ -422,7 +422,7 @@ vfloat32m1_t test_vfadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { @@ -431,7 +431,7 @@ vfloat32m2_t test_vfadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { @@ -440,7 +440,7 @@ vfloat32m2_t test_vfadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { @@ -449,7 +449,7 @@ vfloat32m4_t test_vfadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { @@ -458,7 +458,7 @@ vfloat32m4_t test_vfadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { @@ -467,7 +467,7 @@ vfloat32m8_t test_vfadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { @@ -476,7 +476,7 @@ vfloat32m8_t test_vfadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { @@ -485,7 +485,7 @@ vfloat64m1_t test_vfadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { @@ -494,7 +494,7 @@ vfloat64m1_t test_vfadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { @@ -503,7 +503,7 @@ vfloat64m2_t test_vfadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { @@ -512,7 +512,7 @@ vfloat64m2_t test_vfadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { @@ -521,7 +521,7 @@ vfloat64m4_t test_vfadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { @@ -530,7 +530,7 @@ vfloat64m4_t test_vfadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vflo // CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { @@ -539,9 +539,280 @@ vfloat64m8_t test_vfadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfadd_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfadd_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfadd_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfadd_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfadd_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfadd_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfadd_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfadd_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfadd_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfadd_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfadd_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfadd_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfadd_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfadd_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfadd_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfadd_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfadd_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfadd_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfadd_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfadd_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c index ba84148..4ece643 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c @@ -6,1678 +6,2433 @@ #include +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { + return vfcvt_x_f_v_i16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { + return vfcvt_x_f_v_i16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { + return vfcvt_x_f_v_i16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { + return vfcvt_x_f_v_i16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { + return vfcvt_x_f_v_i16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { + return vfcvt_x_f_v_i16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { + return vfcvt_xu_f_v_u16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { + return vfcvt_xu_f_v_u16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { + return vfcvt_xu_f_v_u16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { + return vfcvt_xu_f_v_u16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) { + return vfcvt_f_x_v_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) { + return vfcvt_f_x_v_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) { + return vfcvt_f_x_v_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) { + return vfcvt_f_x_v_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) { + return vfcvt_f_x_v_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) { + return vfcvt_f_x_v_f16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) { + return vfcvt_f_xu_v_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) { + return vfcvt_f_xu_v_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) { + return vfcvt_f_xu_v_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) { + return vfcvt_f_xu_v_f16m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2 (vfloat32mf2_t src, size_t vl) { + return vfcvt_x_f_v_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2 (vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_x_f_v_i32m1 (vfloat32m1_t src, size_t vl) { + return vfcvt_x_f_v_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1 (vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_x_f_v_i32m2 (vfloat32m2_t src, size_t vl) { + return vfcvt_x_f_v_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2 (vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_x_f_v_i32m4 (vfloat32m4_t src, size_t vl) { + return vfcvt_x_f_v_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4 (vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_x_f_v_i32m8 (vfloat32m8_t src, size_t vl) { + return vfcvt_x_f_v_i32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8 (vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2 (vfloat32mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2 (vfloat32mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_xu_f_v_u32m1 (vfloat32m1_t src, size_t vl) { + return vfcvt_xu_f_v_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1 (vfloat32m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_xu_f_v_u32m2 (vfloat32m2_t src, size_t vl) { + return vfcvt_xu_f_v_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2 (vfloat32m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_xu_f_v_u32m4 (vfloat32m4_t src, size_t vl) { + return vfcvt_xu_f_v_u32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_x_f_v_i32mf2(src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4 (vfloat32m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32mf2(src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8 (vfloat32m8_t src, size_t vl) { + return vfcvt_xu_f_v_u32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_x_f_v_i32m1(src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8 (vfloat32m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m1(src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2 (vint32mf2_t src, size_t vl) { + return vfcvt_f_x_v_f32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_x_f_v_i32m2(src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1 (vint32m1_t src, size_t vl) { + return vfcvt_f_x_v_f32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m2(src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2 (vint32m2_t src, size_t vl) { + return vfcvt_f_x_v_f32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_x_f_v_i32m4(src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4 (vint32m4_t src, size_t vl) { + return vfcvt_f_x_v_f32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m4(src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8 (vint32m8_t src, size_t vl) { + return vfcvt_f_x_v_f32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_x_f_v_i32m8(src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2 (vuint32mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i32m8(src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1 (vuint32m1_t src, size_t vl) { + return vfcvt_f_xu_v_f32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u32mf2(src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2 (vuint32m2_t src, size_t vl) { + return vfcvt_f_xu_v_f32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32mf2(src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4 (vuint32m4_t src, size_t vl) { + return vfcvt_f_xu_v_f32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_xu_f_v_u32m1(src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8 (vuint32m8_t src, size_t vl) { + return vfcvt_f_xu_v_f32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_x_f_v_i64m1 (vfloat64m1_t src, size_t vl) { + return vfcvt_x_f_v_i64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1 (vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_x_f_v_i64m2 (vfloat64m2_t src, size_t vl) { + return vfcvt_x_f_v_i64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2 (vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_x_f_v_i64m4 (vfloat64m4_t src, size_t vl) { + return vfcvt_x_f_v_i64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4 (vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_x_f_v_i64m8 (vfloat64m8_t src, size_t vl) { + return vfcvt_x_f_v_i64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8 (vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_xu_f_v_u64m1 (vfloat64m1_t src, size_t vl) { + return vfcvt_xu_f_v_u64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1 (vfloat64m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_xu_f_v_u64m2 (vfloat64m2_t src, size_t vl) { + return vfcvt_xu_f_v_u64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2 (vfloat64m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_xu_f_v_u64m4 (vfloat64m4_t src, size_t vl) { + return vfcvt_xu_f_v_u64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4 (vfloat64m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_xu_f_v_u64m8 (vfloat64m8_t src, size_t vl) { + return vfcvt_xu_f_v_u64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8 (vfloat64m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfcvt_f_x_v_f64m1 (vint64m1_t src, size_t vl) { + return vfcvt_f_x_v_f64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfcvt_f_x_v_f64m2 (vint64m2_t src, size_t vl) { + return vfcvt_f_x_v_f64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfcvt_f_x_v_f64m4 (vint64m4_t src, size_t vl) { + return vfcvt_f_x_v_f64m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m1(src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8 (vint64m8_t src, size_t vl) { + return vfcvt_f_x_v_f64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_xu_f_v_u32m2(src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1 (vuint64m1_t src, size_t vl) { + return vfcvt_f_xu_v_f64m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m2(src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2 (vuint64m2_t src, size_t vl) { + return vfcvt_f_xu_v_f64m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_xu_f_v_u32m4(src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4 (vuint64m4_t src, size_t vl) { + return vfcvt_f_xu_v_f64m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m4(src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8 (vuint64m8_t src, size_t vl) { + return vfcvt_f_xu_v_f64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_xu_f_v_u32m8(src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u32m8(src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { - return vfcvt_f_x_v_f32mf2(src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { - return vfcvt_f_x_v_f32m1(src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { - return vfcvt_f_x_v_f32m2(src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { - return vfcvt_f_x_v_f32m4(src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { - return vfcvt_f_x_v_f32m8(src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f32mf2(src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { - return vfcvt_f_xu_v_f32m1(src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { - return vfcvt_f_xu_v_f32m2(src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { - return vfcvt_f_xu_v_f32m4(src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { - return vfcvt_f_xu_v_f32m8(src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_x_f_v_i64m1(src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m1(src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_x_f_v_i64m2(src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m2(src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_x_f_v_i64m4(src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m4(src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_x_f_v_i64m8(src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i64m8(src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_xu_f_v_u64m1(src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m1(src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_xu_f_v_u64m2(src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m2(src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_xu_f_v_u64m4(src, vl); +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { + return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m4(src, vl); +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { + return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_xu_f_v_u64m8(src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { + return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u64m8(src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { + return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { - return vfcvt_f_x_v_f64m1(src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { + return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { - return vfcvt_f_x_v_f64m2(src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { + return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { - return vfcvt_f_x_v_f64m4(src, vl); +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { - return vfcvt_f_x_v_f64m8(src, vl); +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { + return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { - return vfcvt_f_xu_v_f64m1(src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { + return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { - return vfcvt_f_xu_v_f64m2(src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { + return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { - return vfcvt_f_xu_v_f64m4(src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { + return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { - return vfcvt_f_xu_v_f64m8(src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { + return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vfloat32m8_t src, size_t vl) { +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vint32mf2_t src, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { return vfcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vint32m1_t src, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { return vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vint32m2_t src, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { return vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vint32m4_t src, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { return vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vint32m8_t src, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { return vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vuint32mf2_t src, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { return vfcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vuint32m1_t src, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { return vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vuint32m2_t src, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { return vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vuint32m4_t src, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { return vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vuint32m8_t src, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { return vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vint64m1_t src, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { return vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vint64m2_t src, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { return vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vint64m4_t src, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { return vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vint64m8_t src, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { return vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vuint64m1_t src, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { return vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vuint64m2_t src, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { return vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vuint64m4_t src, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { return vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vuint64m8_t src, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { return vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4(src, vl); +vint16mf4_t test_vfcvt_x_f_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4(src, vl); +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2(src, vl); +vint16mf2_t test_vfcvt_x_f_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2(src, vl); +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1(src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1(src, vl); +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2(src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2(src, vl); +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4(src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4(src, vl); +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8(src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8(src, vl); +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4(src, vl); +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4(src, vl); +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2(src, vl); +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2(src, vl); +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1(src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1(src, vl); +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2(src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2(src, vl); +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4(src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4(src, vl); +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8(src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8(src, vl); +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_x_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_x_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_x_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_x_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_x_f_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_x_f_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_x_f_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4(src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2(src, vl); +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1(src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2(src, vl); +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4(src, vl); +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8(src, vl); +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4(src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2(src, vl); +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1(src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2(src, vl); +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4(src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8(src, vl); +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl); +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl); +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfcvt_x_f_v_i64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl); +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_x_f_v_i64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { - return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { - return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { - return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfcvt_xu_f_v_u64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { - return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfcvt_rtz_xu_f_v_u64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { - return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { - return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { - return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl, uint8_t ta) { + return vfcvt_f_x_v_f64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { - return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { - return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { - return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { - return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl, uint8_t ta) { + return vfcvt_f_xu_v_f64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c index 77aff2d..7941ca9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfdiv.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -173,411 +281,547 @@ vfloat64m8_t test_vfdiv_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfdiv_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfdiv_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfdiv_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfdiv_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfdiv_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfdiv_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfdiv_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfdiv_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfdiv_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfdiv_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfdiv_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfdiv_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfdiv_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfdiv_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfdiv_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfdiv_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfdiv_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfdiv_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfdiv_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfdiv_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfdiv_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c index 608bf6c..3a778a0 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmax.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -173,411 +281,547 @@ vfloat64m8_t test_vfmax_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfmax_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmax_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmax_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmax_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmax_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmax_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmax_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmax_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmax_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfmax_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfmax_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmax_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfmax_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfmax_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfmax_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfmax_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfmax_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfmax_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfmax_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfmax_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfmax_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfmax_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfmax_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfmax_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfmax_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfmax_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfmax_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfmax_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfmax_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmax_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmax_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmax_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmax_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmax_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmax_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmax_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmax_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmax_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmax_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfmax_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmax_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfmax_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmax_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmax_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfmax_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmax_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmax_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfmax_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfmax_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfmax_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfmax_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfmax_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfmax_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmax_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfmax_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmax_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmax_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfmax_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmax_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmax_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfmax_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmax_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfmax_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmax_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmax_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfmax_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c index b4c7ce2..8f28fcb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmin.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -177,9 +285,117 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfmin_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmin_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmin_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmin_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmin_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmin_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmin_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmin_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmin_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -190,7 +406,7 @@ vfloat32mf2_t test_vfmin_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -200,7 +416,7 @@ vfloat32mf2_t test_vfmin_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -211,7 +427,7 @@ vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -221,7 +437,7 @@ vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -232,7 +448,7 @@ vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -242,7 +458,7 @@ vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -253,7 +469,7 @@ vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -263,7 +479,7 @@ vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -274,7 +490,7 @@ vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -284,7 +500,7 @@ vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -295,7 +511,7 @@ vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -305,7 +521,7 @@ vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -316,7 +532,7 @@ vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -326,7 +542,7 @@ vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -337,7 +553,7 @@ vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -347,7 +563,7 @@ vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -358,7 +574,7 @@ vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -366,218 +582,273 @@ vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, return vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmin_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmin_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmin_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmin_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmin_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmin_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmin_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmin_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmin_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmin_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfmin_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmin_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfmin_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmin_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmin_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfmin_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmin_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmin_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfmin_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfmin_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfmin_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfmin_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfmin_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfmin_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmin_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfmin_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmin_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmin_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfmin_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmin_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmin_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfmin_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmin_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfmin_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmin_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmin_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfmin_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c index 00059ed..e621ff4 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmul.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -177,9 +285,117 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfmul_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfmul_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmul_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfmul_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmul_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmul_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmul_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmul_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmul_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -190,7 +406,7 @@ vfloat32mf2_t test_vfmul_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -200,7 +416,7 @@ vfloat32mf2_t test_vfmul_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -211,7 +427,7 @@ vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -221,7 +437,7 @@ vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -232,7 +448,7 @@ vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -242,7 +458,7 @@ vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -253,7 +469,7 @@ vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -263,7 +479,7 @@ vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -274,7 +490,7 @@ vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -284,7 +500,7 @@ vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -295,7 +511,7 @@ vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -305,7 +521,7 @@ vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -316,7 +532,7 @@ vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -326,7 +542,7 @@ vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -337,7 +553,7 @@ vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -347,7 +563,7 @@ vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -358,7 +574,7 @@ vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -366,218 +582,273 @@ vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, return vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmul_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfmul_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmul_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmul_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfmul_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmul_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmul_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmul_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmul_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfmul_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfmul_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfmul_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfmul_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmul_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfmul_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfmul_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfmul_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmul_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfmul_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfmul_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfmul_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfmul_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfmul_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfmul_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfmul_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfmul_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmul_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmul_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfmul_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmul_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmul_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfmul_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmul_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfmul_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfmul_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmul_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfmul_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c index 227fdbb..34a7890 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) { + return vfmv_v_f_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) { + return vfmv_v_f_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64(float [[SRC:%.*]], i64 [[VL:%.*]]) @@ -86,6 +140,113 @@ vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) { vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) { return vfmv_v_f_f64m8(src, vl); } +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src) { + return vfmv_f_s_f16mf4_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_s_f_f16mf4 (vfloat16mf4_t dest, _Float16 src, size_t vl) { + return vfmv_s_f_f16mf4(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src) { + return vfmv_f_s_f16mf2_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_s_f_f16mf2 (vfloat16mf2_t dest, _Float16 src, size_t vl) { + return vfmv_s_f_f16mf2(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m1_f16 (vfloat16m1_t src) { + return vfmv_f_s_f16m1_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_s_f_f16m1 (vfloat16m1_t dest, _Float16 src, size_t vl) { + return vfmv_s_f_f16m1(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m2_f16 (vfloat16m2_t src) { + return vfmv_f_s_f16m2_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_s_f_f16m2 (vfloat16m2_t dest, _Float16 src, size_t vl) { + return vfmv_s_f_f16m2(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m4_f16 (vfloat16m4_t src) { + return vfmv_f_s_f16m4_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_s_f_f16m4 (vfloat16m4_t dest, _Float16 src, size_t vl) { + return vfmv_s_f_f16m4(dest, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m8_f16 (vfloat16m8_t src) { + return vfmv_f_s_f16m8_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_s_f_f16m8 (vfloat16m8_t dest, _Float16 src, size_t vl) { + return vfmv_s_f_f16m8(dest, src, vl); +} // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( // CHECK-RV64-NEXT: entry: @@ -98,7 +259,7 @@ float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) { @@ -116,7 +277,7 @@ float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) { @@ -134,7 +295,7 @@ float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) { @@ -152,7 +313,7 @@ float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) { @@ -170,7 +331,7 @@ float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) { @@ -188,7 +349,7 @@ double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) { @@ -206,7 +367,7 @@ double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) { @@ -224,7 +385,7 @@ double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) { @@ -236,177 +397,16 @@ vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { +double test_vfmv_f_s_f64m8_f64 (vfloat64m8_t src) { return vfmv_f_s_f64m8_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) { - return vfmv_s_f_f64m8(dst, src, vl); +vfloat64m8_t test_vfmv_s_f_f64m8 (vfloat64m8_t dest, double src, size_t vl) { + return vfmv_s_f_f64m8(dest, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) { - return vfmv_v_f_f16mf4(src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) { - return vfmv_v_f_f16mf2(src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m1(src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m2(src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m4(src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64(half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m8(src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] -// -_Float16 test_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src) { - return vfmv_f_s_f16mf4_f16(src); -} - -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfmv_s_f_f16mf4 (vfloat16mf4_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16mf4(dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] -// -_Float16 test_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src) { - return vfmv_f_s_f16mf2_f16(src); -} - -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfmv_s_f_f16mf2 (vfloat16mf2_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16mf2(dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] -// -_Float16 test_vfmv_f_s_f16m1_f16 (vfloat16m1_t src) { - return vfmv_f_s_f16m1_f16(src); -} - -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfmv_s_f_f16m1 (vfloat16m1_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m1(dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] -// -_Float16 test_vfmv_f_s_f16m2_f16 (vfloat16m2_t src) { - return vfmv_f_s_f16m2_f16(src); -} - -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfmv_s_f_f16m2 (vfloat16m2_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m2(dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] -// -_Float16 test_vfmv_f_s_f16m4_f16 (vfloat16m4_t src) { - return vfmv_f_s_f16m4_f16(src); -} - -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfmv_s_f_f16m4 (vfloat16m4_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m4(dest, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] -// -_Float16 test_vfmv_f_s_f16m8_f16 (vfloat16m8_t src) { - return vfmv_f_s_f16m8_f16(src); -} - -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vfmv_s_f_f16m8 (vfloat16m8_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m8(dest, src, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c index 6bc0034..1469397 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c @@ -6,6 +6,222 @@ #include +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { + return vfncvt_x_f_w_i8mf8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { + return vfncvt_x_f_w_i8mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { + return vfncvt_x_f_w_i8mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { + return vfncvt_x_f_w_i8m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { + return vfncvt_x_f_w_i8m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { + return vfncvt_x_f_w_i8m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { + return vfncvt_xu_f_w_u8m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { + return vfncvt_xu_f_w_u8m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { + return vfncvt_xu_f_w_u8m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m4(src, vl); +} + // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -186,46 +402,226 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_x_f_w_i32mf2(src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4(vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32mf2(src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2(vint32m1_t src, size_t vl) { + return vfncvt_f_x_w_f16mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_x_f_w_i32m1(src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1(vint32m2_t src, size_t vl) { + return vfncvt_f_x_w_f16m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i32m1(src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2(vint32m4_t src, size_t vl) { + return vfncvt_f_x_w_f16m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_x_w_f16m4(vint32m8_t src, size_t vl) { + return vfncvt_f_x_w_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4(vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2(vuint32m1_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_xu_w_f16m1(vuint32m2_t src, size_t vl) { + return vfncvt_f_xu_w_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_xu_w_f16m2(vuint32m4_t src, size_t vl) { + return vfncvt_f_xu_w_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_xu_w_f16m4(vuint32m8_t src, size_t vl) { + return vfncvt_f_xu_w_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { + return vfncvt_f_f_w_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { + return vfncvt_f_f_w_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { + return vfncvt_f_f_w_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { + return vfncvt_f_f_w_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { + return vfncvt_x_f_w_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { + return vfncvt_x_f_w_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { return vfncvt_x_f_w_i32m2(src, vl); @@ -470,1334 +866,1735 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4 (vfloat64m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m4(src, vl); } +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +} + // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, - vint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16mf4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, - vint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, - vuint16mf4_t maskedoff, - vfloat32mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { + return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { + return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { + return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { + return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { + return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { + return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { + return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { + return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { + return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, - vuint16mf2_t maskedoff, - vfloat32m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, - vuint16m1_t maskedoff, - vfloat32m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vfloat32m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { - return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vfloat32m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, - vint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, - vuint32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, - vuint32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vint64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { return vfncvt_f_x_w_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vint64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vint64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vint64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vuint64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { return vfncvt_f_xu_w_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vuint64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vuint64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vuint64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_f_f_w_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat64m1_t src, size_t vl) { +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_rod_f_f_w_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, - vfloat32m1_t maskedoff, - vfloat64m2_t src, size_t vl) { +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, - vfloat32m2_t maskedoff, - vfloat64m4_t src, size_t vl) { +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, - vfloat32m4_t maskedoff, - vfloat64m8_t src, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8(src, vl); +vint8mf8_t test_vfncvt_x_f_w_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i8mf8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8(src, vl); +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i8mf8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4(src, vl); +vint8mf4_t test_vfncvt_x_f_w_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i8mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4(src, vl); +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i8mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2(src, vl); +vint8mf2_t test_vfncvt_x_f_w_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i8mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2(src, vl); +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i8mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1(src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i8m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1(src, vl); +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i8m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2(src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i8m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2(src, vl); +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i8m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4(src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i8m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4(src, vl); +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i8m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8(src, vl); +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u8mf8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8(src, vl); +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u8mf8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4(src, vl); +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u8mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4(src, vl); +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u8mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2(src, vl); +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u8mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2(src, vl); +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u8mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1(src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u8m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1(src, vl); +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u8m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2(src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u8m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2(src, vl); +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u8m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4(src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u8m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4(src, vl); +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u8m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4(src, vl); +vint16mf4_t test_vfncvt_x_f_w_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2(src, vl); +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1(src, vl); +vint16mf2_t test_vfncvt_x_f_w_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2(src, vl); +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4(src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4(src, vl); +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2(src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1(src, vl); +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2(src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4(src, vl); +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4(src, vl); +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4(src, vl); +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_xu_f_w_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_xu_f_w_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_xu_f_w_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2(src, vl); +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2(src, vl); +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1(src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1(src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2(src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2(src, vl); +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4(src, vl); +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4(src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl); +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl); +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl); +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl); +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl); +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_x_f_w_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_x_f_w_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl); +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_x_f_w_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl); +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl); +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { - return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { - return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl); +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { - return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_xu_f_w_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { - return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rtz_xu_f_w_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { - return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { - return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { - return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_f_x_w_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { - return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { - return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_f_xu_w_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_f_f_w_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl, uint8_t ta) { + return vfncvt_rod_f_f_w_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c index d705f7a..6e6c697 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { + return vfneg_v_f16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { + return vfneg_v_f16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { + return vfneg_v_f16m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { + return vfneg_v_f16m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { + return vfneg_v_f16m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) { + return vfneg_v_f16m8(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) @@ -87,9 +141,63 @@ vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { return vfneg_v_f64m8(op1, vl); } +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfneg_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfneg_v_f16mf4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfneg_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfneg_v_f16mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfneg_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfneg_v_f16m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfneg_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfneg_v_f16m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfneg_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfneg_v_f16m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfneg_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfneg_v_f16m8_m(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { @@ -98,7 +206,7 @@ vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vf // CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { @@ -107,7 +215,7 @@ vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { @@ -116,7 +224,7 @@ vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { @@ -125,7 +233,7 @@ vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { @@ -134,7 +242,7 @@ vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat // CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { @@ -143,7 +251,7 @@ vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { @@ -152,7 +260,7 @@ vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { @@ -161,117 +269,145 @@ vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloa // CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { return vfneg_v_f64m8_m(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfneg_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfneg_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfneg_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfneg_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m1( +// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfneg_v_f16m1 (vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1(op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m2( +// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfneg_v_f16m2 (vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2(op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m4( +// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfneg_v_f16m4 (vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4(op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m8( +// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfneg_v_f16m8 (vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8(op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfneg_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfneg_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfneg_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfneg_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfneg_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfneg_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfneg_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfneg_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfneg_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfneg_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfneg_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfneg_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfneg_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfneg_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfneg_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfneg_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfneg_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, uint8_t ta) { + return vfneg_v_f64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c index 5ba1f1a..b443469 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrdiv.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) @@ -83,204 +137,277 @@ vfloat64m4_t test_vfrdiv_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfrdiv_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrdiv_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfrdiv_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrdiv_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrdiv_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrdiv_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrdiv_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrdiv_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrdiv_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrdiv_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrdiv_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrdiv_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfrdiv_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c index c73f945..179126e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { + return vfrec7_v_f16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { + return vfrec7_v_f16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) { + return vfrec7_v_f16m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) { + return vfrec7_v_f16m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) { + return vfrec7_v_f16m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) { + return vfrec7_v_f16m8(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -87,9 +141,63 @@ vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfrec7_v_f64m8(op1, vl); } +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrec7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfrec7_v_f16mf4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrec7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfrec7_v_f16mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrec7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfrec7_v_f16m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrec7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfrec7_v_f16m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrec7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfrec7_v_f16m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrec7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfrec7_v_f16m8_m(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, @@ -99,7 +207,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, @@ -109,7 +217,7 @@ vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, @@ -119,7 +227,7 @@ vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -129,7 +237,7 @@ vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -139,7 +247,7 @@ vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -149,7 +257,7 @@ vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -159,7 +267,7 @@ vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -169,7 +277,7 @@ vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -177,110 +285,138 @@ vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, return vfrec7_v_f64m8_m(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfrec7_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfrec7_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1( +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1(op1, vl); +vfloat16m1_t test_vfrec7_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2( +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2(op1, vl); +vfloat16m2_t test_vfrec7_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4( +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4(op1, vl); +vfloat16m4_t test_vfrec7_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8( +// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8(op1, vl); +vfloat16m8_t test_vfrec7_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrec7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrec7_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrec7_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrec7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrec7_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrec7_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrec7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrec7_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrec7_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrec7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrec7_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrec7_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrec7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrec7_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrec7_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrec7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrec7_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrec7_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrec7_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrec7_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrec7_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, uint8_t ta) { + return vfrec7_v_f64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c index dc41406..c78fd72 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { + return vfrsqrt7_v_f16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { + return vfrsqrt7_v_f16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) { + return vfrsqrt7_v_f16m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) { + return vfrsqrt7_v_f16m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) { + return vfrsqrt7_v_f16m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) { + return vfrsqrt7_v_f16m8(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -87,200 +141,273 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { return vfrsqrt7_v_f64m8(op1, vl); } +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfrsqrt7_v_f16mf4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfrsqrt7_v_f16mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsqrt7_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfrsqrt7_v_f16m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsqrt7_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfrsqrt7_v_f16m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsqrt7_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfrsqrt7_v_f16m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsqrt7_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfrsqrt7_v_f16m8_m(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t vl) { +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { return vfrsqrt7_v_f32mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t vl) { +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { return vfrsqrt7_v_f32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t vl) { +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { return vfrsqrt7_v_f32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t vl) { +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { return vfrsqrt7_v_f32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t vl) { +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { return vfrsqrt7_v_f32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t vl) { +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { return vfrsqrt7_v_f64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t vl) { +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { return vfrsqrt7_v_f64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t vl) { +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { return vfrsqrt7_v_f64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { return vfrsqrt7_v_f64m8_m(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1(op1, vl); +vfloat16m1_t test_vfrsqrt7_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2(op1, vl); +vfloat16m2_t test_vfrsqrt7_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4(op1, vl); +vfloat16m4_t test_vfrsqrt7_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8(op1, vl); +vfloat16m8_t test_vfrsqrt7_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfrsqrt7_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfrsqrt7_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfrsqrt7_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsqrt7_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfrsqrt7_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfrsqrt7_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsqrt7_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfrsqrt7_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfrsqrt7_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsqrt7_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfrsqrt7_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfrsqrt7_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsqrt7_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfrsqrt7_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfrsqrt7_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsqrt7_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsqrt7_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsqrt7_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, uint8_t ta) { + return vfrsqrt7_v_f64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c index 4f55613..d052732 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsub.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) @@ -83,204 +137,277 @@ vfloat64m4_t test_vfrsub_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfrsub_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfrsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfrsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfrsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfrsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfrsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfrsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfrsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfrsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfrsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfrsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfrsub_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsub_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsub_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfrsub_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsub_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfrsub_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c index 13ab571..d8a3e8b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsgnj.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnj_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnj_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnj_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnj_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnj_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnj_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -177,6 +285,114 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfsgnj_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjn_vv_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjn_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjn_vf_f16mf4(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vv_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjn_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjn_vf_f16mf2(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vv_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjn_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjn_vf_f16m1(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vv_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjn_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vf_f16m2(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vv_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjn_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vf_f16m4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vv_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjn_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vf_f16m8(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -348,60 +564,156 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { return vfsgnjn_vf_f64m8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vv_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfsgnjx_vv_f32mf2(op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16mf4(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vf_f32mf2(vfloat32mf2_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjx_vv_f16mf2(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfsgnjx_vv_f32m1(op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16mf2(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vf_f32m1(vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjx_vf_f32m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjx_vv_f16m1(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfsgnjx_vv_f32m2(op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m1(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjx_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjx_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjx_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjx_vv_f32mf2 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjx_vv_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2 (vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjx_vv_f32m1 (vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjx_vv_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjx_vf_f32m1 (vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjx_vf_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjx_vv_f32m2 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjx_vv_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjx_vf_f32m2 (vfloat32m2_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m2(op1, op2, vl); } @@ -410,8 +722,7 @@ vfloat32m2_t test_vfsgnjx_vf_f32m2(vfloat32m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsgnjx_vv_f32m4(op1, op2, vl); } @@ -420,7 +731,7 @@ vfloat32m4_t test_vfsgnjx_vv_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4 (vfloat32m4_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m4(op1, op2, vl); } @@ -429,8 +740,7 @@ vfloat32m4_t test_vfsgnjx_vf_f32m4(vfloat32m4_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8 (vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnjx_vv_f32m8(op1, op2, vl); } @@ -439,7 +749,7 @@ vfloat32m8_t test_vfsgnjx_vv_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8 (vfloat32m8_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m8(op1, op2, vl); } @@ -448,8 +758,7 @@ vfloat32m8_t test_vfsgnjx_vf_f32m8(vfloat32m8_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1 (vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnjx_vv_f64m1(op1, op2, vl); } @@ -458,7 +767,7 @@ vfloat64m1_t test_vfsgnjx_vv_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1 (vfloat64m1_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m1(op1, op2, vl); } @@ -467,8 +776,7 @@ vfloat64m1_t test_vfsgnjx_vf_f64m1(vfloat64m1_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2 (vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnjx_vv_f64m2(op1, op2, vl); } @@ -477,7 +785,7 @@ vfloat64m2_t test_vfsgnjx_vv_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2 (vfloat64m2_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m2(op1, op2, vl); } @@ -486,8 +794,7 @@ vfloat64m2_t test_vfsgnjx_vf_f64m2(vfloat64m2_t op1, double op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4 (vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnjx_vv_f64m4(op1, op2, vl); } @@ -496,7 +803,7 @@ vfloat64m4_t test_vfsgnjx_vv_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vf_f64m4(vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4 (vfloat64m4_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m4(op1, op2, vl); } @@ -515,1223 +822,1627 @@ vfloat64m8_t test_vfsgnjx_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnj_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnj_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnj_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnj_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnj_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnj_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnj_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnj_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsgnj_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsgnj_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnj_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfsgnj_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfsgnj_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnj_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsgnj_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfsgnj_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfsgnj_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnj_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsgnj_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfsgnj_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfsgnj_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnj_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsgnj_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsgnj_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfsgnj_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnj_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnj_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsgnj_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfsgnj_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnj_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnj_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsgnj_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfsgnj_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnj_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnj_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsgnj_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfsgnj_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnj_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnj_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsgnj_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfsgnj_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnj_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsgnj_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsgnj_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfsgnj_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vfsgnjn_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjn_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, - size_t vl) { - return vfsgnjn_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjn_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl); -} +vfloat16m2_t test_vfsgnjn_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjn_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjn_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjn_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vfsgnjn_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjn_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsgnjn_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjn_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsgnjn_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsgnjn_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +} // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsgnjn_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfsgnjn_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnjn_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsgnjn_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfsgnjn_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnjn_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsgnjn_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfsgnjn_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnjn_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsgnjn_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfsgnjn_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnjn_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsgnjn_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfsgnjn_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsgnjn_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsgnjn_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfsgnjn_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsgnjx_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsgnjx_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsgnjx_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsgnjx_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsgnjx_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsgnjx_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsgnjx_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, - size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsgnjx_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfsgnjx_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsgnjx_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfsgnjx_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsgnjx_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsgnjx_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfsgnjx_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsgnjx_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsgnjx_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsgnjx_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsgnjx_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfsgnjx_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnj_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnj_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4(op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4(op1, op2, vl); +vfloat32mf2_t test_vfsgnj_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2(op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2(op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1(op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1(op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2(op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2(op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4(op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4(op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8(op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8(op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnj_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnj_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnj_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnj_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnj_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfsgnj_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnj_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnj_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnj_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnj_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnj_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnj_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnj_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnj_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjn_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjn_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjn_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjn_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjn_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfsgnjn_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjn_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjn_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjn_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjn_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjn_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsgnjx_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsgnjx_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjx_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsgnjx_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjx_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsgnjx_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjx_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfsgnjx_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsgnjx_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsgnjx_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfsgnjx_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c index ca951c0..a62016b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1down.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16mf4(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16mf2(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m1(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m2(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m4(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m8(src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) @@ -96,9 +150,63 @@ vfloat64m8_t test_vfslide1down_vf_f64m8(vfloat64m8_t src, double value, return vfslide1down_vf_f64m8(src, value, vl); } +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1down_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16mf4_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1down_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16mf2_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1down_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m1_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1down_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m2_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1down_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m4_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1down_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1down_vf_f16m8_m(mask, maskedoff, src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, @@ -110,7 +218,7 @@ vfloat32mf2_t test_vfslide1down_vf_f32mf2_m(vbool64_t mask, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, @@ -122,7 +230,7 @@ vfloat32m1_t test_vfslide1down_vf_f32m1_m(vbool32_t mask, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, @@ -134,7 +242,7 @@ vfloat32m2_t test_vfslide1down_vf_f32m2_m(vbool16_t mask, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, @@ -145,7 +253,7 @@ vfloat32m4_t test_vfslide1down_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, @@ -156,7 +264,7 @@ vfloat32m8_t test_vfslide1down_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, @@ -168,7 +276,7 @@ vfloat64m1_t test_vfslide1down_vf_f64m1_m(vbool64_t mask, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, @@ -180,7 +288,7 @@ vfloat64m2_t test_vfslide1down_vf_f64m2_m(vbool32_t mask, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, @@ -192,7 +300,7 @@ vfloat64m4_t test_vfslide1down_vf_f64m4_m(vbool16_t mask, // CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -201,110 +309,138 @@ vfloat64m8_t test_vfslide1down_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, return vfslide1down_vf_f64m8_m(mask, maskedoff, src, value, vl); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1down_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4(src, value, vl); +vfloat16mf4_t test_vfslide1down_vf_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f16mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1down_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2(src, value, vl); +vfloat16mf2_t test_vfslide1down_vf_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f16mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1down_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1(src, value, vl); +vfloat16m1_t test_vfslide1down_vf_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f16m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1down_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2(src, value, vl); +vfloat16m2_t test_vfslide1down_vf_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f16m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1down_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4(src, value, vl); +vfloat16m4_t test_vfslide1down_vf_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f16m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1down_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8(src, value, vl); +vfloat16m8_t test_vfslide1down_vf_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f16m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1down_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf4_m(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1down_vf_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f32mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1down_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16mf2_m(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1down_vf_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f32m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1down_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m1_m(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1down_vf_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f32m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1down_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m2_m(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1down_vf_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f32m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1down_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m4_m(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1down_vf_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f32m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1down_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1down_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1down_vf_f16m8_m(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1down_vf_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f64m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfslide1down_vf_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f64m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfslide1down_vf_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f64m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1down_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfslide1down_vf_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl, uint8_t ta) { + return vfslide1down_vf_f64m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c index 1fee5aa..1fadbde 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfslide1up.c @@ -2,10 +2,65 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16mf4(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16mf2(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m1(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m2(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m4(src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m8(src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f32.f32.i64( [[SRC:%.*]], float [[VALUE:%.*]], i64 [[VL:%.*]]) @@ -91,215 +146,277 @@ vfloat64m4_t test_vfslide1up_vf_f64m4(vfloat64m4_t src, double value, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f64.f64.i64( [[SRC:%.*]], double [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1up_vf_f64m8(vfloat64m8_t src, double value, - size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8 (vfloat64m8_t src, double value, size_t vl) { return vfslide1up_vf_f64m8(src, value, vl); } +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfslide1up_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16mf4_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfslide1up_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16mf2_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfslide1up_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m1_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfslide1up_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m2_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfslide1up_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m4_m(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfslide1up_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { + return vfslide1up_vf_f16m8_m(mask, maskedoff, src, value, vl); +} + // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfslide1up_vf_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat32mf2_t src, float value, - size_t vl) { +vfloat32mf2_t test_vfslide1up_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl) { return vfslide1up_vf_f32mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfslide1up_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t src, float value, - size_t vl) { +vfloat32m1_t test_vfslide1up_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl) { return vfslide1up_vf_f32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfslide1up_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t src, float value, - size_t vl) { +vfloat32m2_t test_vfslide1up_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl) { return vfslide1up_vf_f32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfslide1up_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t src, float value, - size_t vl) { +vfloat32m4_t test_vfslide1up_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl) { return vfslide1up_vf_f32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfslide1up_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t src, float value, - size_t vl) { +vfloat32m8_t test_vfslide1up_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl) { return vfslide1up_vf_f32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfslide1up_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t src, double value, - size_t vl) { +vfloat64m1_t test_vfslide1up_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl) { return vfslide1up_vf_f64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfslide1up_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t src, double value, - size_t vl) { +vfloat64m2_t test_vfslide1up_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl) { return vfslide1up_vf_f64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfslide1up_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t src, double value, - size_t vl) { +vfloat64m4_t test_vfslide1up_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl) { return vfslide1up_vf_f64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfslide1up_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t src, double value, - size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl) { return vfslide1up_vf_f64m8_m(mask, maskedoff, src, value, vl); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1up_vf_f16mf4 (vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4(src, value, vl); +vfloat16mf4_t test_vfslide1up_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f16mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1up_vf_f16mf2 (vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2(src, value, vl); +vfloat16mf2_t test_vfslide1up_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f16mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1up_vf_f16m1 (vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1(src, value, vl); +vfloat16m1_t test_vfslide1up_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f16m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1up_vf_f16m2 (vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2(src, value, vl); +vfloat16m2_t test_vfslide1up_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f16m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1up_vf_f16m4 (vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4(src, value, vl); +vfloat16m4_t test_vfslide1up_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f16m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32f16.f16.i64( [[SRC:%.*]], half [[VALUE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1up_vf_f16m8 (vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8(src, value, vl); +vfloat16m8_t test_vfslide1up_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f16m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfslide1up_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf4_m(mask, maskedoff, src, value, vl); +vfloat32mf2_t test_vfslide1up_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, float value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f32mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfslide1up_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16mf2_m(mask, maskedoff, src, value, vl); +vfloat32m1_t test_vfslide1up_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, float value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f32m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfslide1up_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m1_m(mask, maskedoff, src, value, vl); +vfloat32m2_t test_vfslide1up_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, float value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f32m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfslide1up_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m2_m(mask, maskedoff, src, value, vl); +vfloat32m4_t test_vfslide1up_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, float value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f32m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], float [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfslide1up_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m4_m(mask, maskedoff, src, value, vl); +vfloat32m8_t test_vfslide1up_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, float value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f32m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfslide1up_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], half [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfslide1up_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, _Float16 value, size_t vl) { - return vfslide1up_vf_f16m8_m(mask, maskedoff, src, value, vl); +vfloat64m1_t test_vfslide1up_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, double value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f64m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfslide1up_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, double value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f64m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfslide1up_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, double value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f64m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfslide1up_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], double [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfslide1up_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, double value, size_t vl, uint8_t ta) { + return vfslide1up_vf_f64m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c index 345378c..3e3ef96 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c @@ -6,6 +6,60 @@ #include +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { + return vfsqrt_v_f16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { + return vfsqrt_v_f16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) { + return vfsqrt_v_f16m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) { + return vfsqrt_v_f16m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) { + return vfsqrt_v_f16m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) { + return vfsqrt_v_f16m8(op1, vl); +} + // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) @@ -83,204 +137,277 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8 (vfloat64m8_t op1, size_t vl) { return vfsqrt_v_f64m8(op1, vl); } +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsqrt_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { + return vfsqrt_v_f16mf4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsqrt_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { + return vfsqrt_v_f16mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsqrt_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsqrt_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsqrt_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsqrt_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl); +} + // CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { return vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4(op1, vl); +vfloat16mf4_t test_vfsqrt_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2(op1, vl); +vfloat16mf2_t test_vfsqrt_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1(op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2(op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4(op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv32f16.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8(op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsqrt_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) { - return vfsqrt_v_f16mf4_m(mask, maskedoff, op1, vl); +vfloat32mf2_t test_vfsqrt_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsqrt_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) { - return vfsqrt_v_f16mf2_m(mask, maskedoff, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { - return vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { - return vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { - return vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { - return vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsqrt_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsqrt_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsqrt_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl, uint8_t ta) { + return vfsqrt_v_f64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c index 7e3831f..81d7150 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsub.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsub_vv_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsub_vv_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsub_vv_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsub_vv_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsub_vv_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsub_vv_f16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -173,411 +281,547 @@ vfloat64m8_t test_vfsub_vv_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8 (vfloat64m8_t op1, double op2, size_t vl) { return vfsub_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfsub_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfsub_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { return vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { return vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { return vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { return vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { return vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { return vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { return vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { return vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { return vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { return vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vv_f16mf4 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsub_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4( +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vf_f16mf4 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4(op1, op2, vl); +vfloat16mf4_t test_vfsub_vf_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsub_vf_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vv_f16mf2 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsub_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2( +// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vf_f16mf2 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2(op1, op2, vl); +vfloat16mf2_t test_vfsub_vf_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsub_vf_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1( +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1( +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1(op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsub_vf_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2( +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2( +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2(op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsub_vf_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4( +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4( +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4(op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsub_vf_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8( +// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8( +// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8(op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfsub_vf_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsub_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfsub_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfsub_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf4_m(mask, maskedoff, op1, op2, vl); +vfloat32mf2_t test_vfsub_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsub_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfsub_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfsub_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m1_m( +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfsub_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m2_m( +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfsub_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsub_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m4_m( +// CHECK-RV64-LABEL: @test_vfsub_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfsub_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfsub_vf_f16m8_m( +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl, uint8_t ta) { + return vfsub_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsub_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsub_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl, uint8_t ta) { + return vfsub_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsub_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsub_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl, uint8_t ta) { + return vfsub_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsub_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl, uint8_t ta) { + return vfsub_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfsub_vf_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsub_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl, uint8_t ta) { + return vfsub_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c index b2633d5..0722521 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwadd.c @@ -6,6 +6,186 @@ #include +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_vv_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_wv_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_vv_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_wv_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_vv_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_wv_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_vv_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_wv_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_vv_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_wv_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -78,7 +258,7 @@ vfloat64m2_t test_vfwadd_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2 (vfloat64m2_t op1, float op2, size_t vl) { return vfwadd_wf_f64m2(op1, op2, vl); } @@ -87,601 +267,718 @@ vfloat64m2_t test_vfwadd_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwadd_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_vv_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_vf_f64m4 (vfloat32m2_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wv_f64m4 (vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwadd_wv_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwadd_wf_f64m4 (vfloat64m4_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vv_f64m8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_vv_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_vf_f64m8 (vfloat32m4_t op1, float op2, size_t vl) { + return vfwadd_vf_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wv_f64m8 (vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwadd_wv_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwadd_wf_f64m8 (vfloat64m8_t op1, float op2, size_t vl) { + return vfwadd_wf_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwadd_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwadd_wv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwadd_wv_f64m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwadd_vv_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { - return vfwadd_vf_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwadd_wv_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { - return vfwadd_wf_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfwadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfwadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { return vfwadd_wv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, float op2, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { return vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { return vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, float op2, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { return vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { return vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, float op2, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { return vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { return vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { return vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_wv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwadd_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwadd_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwadd_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwadd_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwadd_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwadd_wv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwadd_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfwadd_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwadd_vf_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_vf_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwadd_wv_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfwadd_wv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwadd_wf_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfwadd_wf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwadd_wf_f32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfwadd_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c index 76f1401..842eb36 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c @@ -6,6 +6,294 @@ #include +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) { + return vfwcvt_f_x_v_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) { + return vfwcvt_f_x_v_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) { + return vfwcvt_f_x_v_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) { + return vfwcvt_f_x_v_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) { + return vfwcvt_f_x_v_f16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) { + return vfwcvt_f_xu_v_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) { + return vfwcvt_f_xu_v_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { + return vfwcvt_x_f_v_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { + return vfwcvt_x_f_v_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { + return vfwcvt_x_f_v_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { + return vfwcvt_x_f_v_i32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { + return vfwcvt_xu_f_v_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -96,6 +384,51 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { return vfwcvt_f_xu_v_f32m8(src, vl); } +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) { + return vfwcvt_f_f_v_f32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) { + return vfwcvt_f_f_v_f32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) { + return vfwcvt_f_f_v_f32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) { + return vfwcvt_f_f_v_f32m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -200,1201 +533,1501 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m2(src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2 (vfloat32m1_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_xu_f_v_u64m4 (vfloat32m2_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4 (vfloat32m2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_xu_f_v_u64m8 (vfloat32m4_t src, size_t vl) { + return vfwcvt_xu_f_v_u64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8 (vfloat32m4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_x_v_f64m1 (vint32mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwcvt_f_x_v_f64m2 (vint32m1_t src, size_t vl) { + return vfwcvt_f_x_v_f64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwcvt_f_x_v_f64m4 (vint32m2_t src, size_t vl) { + return vfwcvt_f_x_v_f64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_x_v_f64m8 (vint32m4_t src, size_t vl) { + return vfwcvt_f_x_v_f64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1 (vuint32mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2 (vuint32m1_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4 (vuint32m2_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8 (vuint32m4_t src, size_t vl) { + return vfwcvt_f_xu_v_f64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfwcvt_f_f_v_f64m1 (vfloat32mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfwcvt_f_f_v_f64m2 (vfloat32m1_t src, size_t vl) { + return vfwcvt_f_f_v_f64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwcvt_f_f_v_f64m4 (vfloat32m2_t src, size_t vl) { + return vfwcvt_f_f_v_f64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_f_v_f64m8 (vfloat32m4_t src, size_t vl) { + return vfwcvt_f_f_v_f64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { + return vfwcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { + return vfwcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { + return vfwcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { + return vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { + return vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { + return vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { + return vfwcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { + return vfwcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { + return vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m4(src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m4(src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u64m8(src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u64m8(src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m1(src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { - return vfwcvt_f_x_v_f64m2(src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { - return vfwcvt_f_x_v_f64m4(src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { - return vfwcvt_f_x_v_f64m8(src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m1(src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m2(src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m4(src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f64m8(src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m1(src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { - return vfwcvt_f_f_v_f64m2(src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { - return vfwcvt_f_f_v_f64m4(src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { - return vfwcvt_f_f_v_f64m8(src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vint16mf4_t src, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { return vfwcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vint16mf2_t src, size_t vl) { +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { return vfwcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vint16m1_t src, size_t vl) { +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { return vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vint16m2_t src, size_t vl) { +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { return vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vint16m4_t src, size_t vl) { +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { return vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vuint16mf4_t src, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { return vfwcvt_f_xu_v_f32mf2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vuint16mf2_t src, size_t vl) { +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vuint16m1_t src, size_t vl) { +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { return vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vuint16m2_t src, size_t vl) { +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { return vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vuint16m4_t src, size_t vl) { +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { return vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vfwcvt_f_f_v_f32mf2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vfwcvt_f_f_v_f32m1_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl); +} + // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, - vuint64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, - vuint64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, - vuint64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vint32mf2_t src, size_t vl) { +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { return vfwcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vint32m1_t src, size_t vl) { +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { return vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vint32m2_t src, size_t vl) { +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { return vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vint32m4_t src, size_t vl) { +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { return vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vuint32mf2_t src, size_t vl) { +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vuint32m1_t src, size_t vl) { +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { return vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vuint32m2_t src, size_t vl) { +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { return vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vuint32m4_t src, size_t vl) { +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { return vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t src, size_t vl) { +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_f_f_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t src, size_t vl) { +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t src, size_t vl) { +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t src, size_t vl) { +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4(src, vl); +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2(src, vl); +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1(src, vl); +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2(src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4(src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8(src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4(src, vl); +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2(src, vl); +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1(src, vl); +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2(src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4(src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8(src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2(src, vl); +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2(src, vl); +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1(src, vl); +vint32m1_t test_vfwcvt_x_f_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1(src, vl); +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2(src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2(src, vl); +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4(src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4(src, vl); +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8(src, vl); +// +vint32m8_t test_vfwcvt_x_f_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8(src, vl); +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2(src, vl); +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2(src, vl); +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1(src, vl); +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1(src, vl); +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2(src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2(src, vl); +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4(src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4(src, vl); +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8(src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8(src, vl); +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2(src, vl); +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1(src, vl); +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2(src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4(src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8(src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { - return vfwcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { - return vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { - return vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { - return vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_x_f_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { - return vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_x_f_v_i64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_x_f_v_i64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_xu_f_v_u64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_rtz_xu_f_v_u64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_x_v_f64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return vfwcvt_f_f_v_f32mf2_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m1_m(mask, maskedoff, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_xu_v_f64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl); +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl, uint8_t ta) { + return vfwcvt_f_f_v_f64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c index fe6b668..ace4141 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmul.c @@ -6,6 +6,96 @@ #include +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwmul_vv_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwmul_vv_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwmul_vv_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwmul_vv_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwmul_vv_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -82,9 +172,99 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { return vfwmul_vf_f64m8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -95,7 +275,7 @@ vfloat64m1_t test_vfwmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, @@ -105,7 +285,7 @@ vfloat64m1_t test_vfwmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -116,7 +296,7 @@ vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, @@ -126,7 +306,7 @@ vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -137,7 +317,7 @@ vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, @@ -147,7 +327,7 @@ vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -158,7 +338,7 @@ vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, // CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, @@ -166,182 +346,165 @@ vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, return vfwmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwmul_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwmul_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwmul_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwmul_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwmul_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwmul_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwmul_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwmul_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfwmul_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwmul_vf_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwmul_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfwmul_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwmul_vf_f32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfwmul_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c index 95c1d9e..59bde72 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwsub.c @@ -6,6 +6,186 @@ #include +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_vv_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_wv_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_vv_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_wv_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_vv_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_wv_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_vv_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_wv_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_vv_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_wv_f32m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m8(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -78,7 +258,7 @@ vfloat64m2_t test_vfwsub_wv_f64m2(vfloat64m2_t op1, vfloat32m1_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2 (vfloat64m2_t op1, float op2, size_t vl) { return vfwsub_wf_f64m2(op1, op2, vl); } @@ -87,601 +267,718 @@ vfloat64m2_t test_vfwsub_wf_f64m2(vfloat64m2_t op1, float op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4(vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwsub_vv_f64m4(op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4 (vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_vv_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_vf_f64m4 (vfloat32m2_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wv_f64m4 (vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return vfwsub_wv_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfwsub_wf_f64m4 (vfloat64m4_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vv_f64m8 (vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_vv_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_vf_f64m8 (vfloat32m4_t op1, float op2, size_t vl) { + return vfwsub_vf_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wv_f64m8 (vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return vfwsub_wv_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfwsub_wf_f64m8 (vfloat64m8_t op1, float op2, size_t vl) { + return vfwsub_wf_f64m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { + return vfwsub_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { + return vfwsub_wv_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vf_f64m4(vfloat32m2_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4(vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { - return vfwsub_wv_f64m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wf_f64m4(vfloat64m4_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8(vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwsub_vv_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { - return vfwsub_vf_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8(vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { - return vfwsub_wv_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f64.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { - return vfwsub_wf_f64m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vfwsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { return vfwsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vfloat32mf2_t op2, - size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl) { return vfwsub_wv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, float op2, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { return vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { return vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { return vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vfloat32m1_t op2, - size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { return vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, float op2, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { return vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { return vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { return vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vfloat32m2_t op2, - size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { return vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, float op2, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { return vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { return vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { return vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vfloat32m4_t op2, - size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { return vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, float op2, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { return vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vv_f32mf2 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_vf_f32mf2 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wv_f32mf2 (vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wf_f32mf2 (vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2(op1, op2, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vv_f32m1 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vf_f32m1 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_vf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wv_f32m1 (vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_wv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1 (vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1(op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2 (vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2 (vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2(op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4 (vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4 (vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4(op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8 (vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8(op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wf_f32m8 (vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwsub_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwsub_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32mf2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfwsub_wv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat16mf4_t op2, size_t vl) { - return vfwsub_wv_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32mf2_m( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfwsub_wf_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32mf2_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_vf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_vf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat32mf2_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat16mf2_t op2, size_t vl) { - return vfwsub_wv_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m1_m( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m2_m( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m4_m( +// CHECK-RV64-LABEL: @test_vfwsub_vv_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfwsub_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwsub_vf_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_vf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_vf_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwsub_wv_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl, uint8_t ta) { + return vfwsub_wv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wv_f32m8_m( +// CHECK-RV64-LABEL: @test_vfwsub_wf_f64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl, uint8_t ta) { + return vfwsub_wf_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vfwsub_wf_f32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m8_t test_vfwsub_wf_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c index 9da0351..bda35be 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c @@ -2,7 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -446,6 +447,66 @@ vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { return vle64_v_u64m8(base, vl); } +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) { + return vle16_v_f16mf4(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) { + return vle16_v_f16mf2(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { + return vle16_v_f16m1(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) { + return vle16_v_f16m2(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) { + return vle16_v_f16m4(base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) { + return vle16_v_f16m8(base, vl); +} + // CHECK-RV64-LABEL: @test_vle32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -539,7 +600,7 @@ vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) { // CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { @@ -549,7 +610,7 @@ vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { @@ -559,7 +620,7 @@ vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { @@ -569,7 +630,7 @@ vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_ // CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { @@ -579,7 +640,7 @@ vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { @@ -589,7 +650,7 @@ vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { @@ -599,7 +660,7 @@ vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { @@ -609,7 +670,7 @@ vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *b // CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { @@ -619,7 +680,7 @@ vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { @@ -629,7 +690,7 @@ vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { @@ -639,7 +700,7 @@ vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int1 // CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { @@ -649,7 +710,7 @@ vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16 // CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { @@ -659,7 +720,7 @@ vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16 // CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { @@ -669,7 +730,7 @@ vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16 // CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { @@ -679,7 +740,7 @@ vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { @@ -689,7 +750,7 @@ vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int3 // CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { @@ -699,7 +760,7 @@ vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int3 // CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { @@ -709,7 +770,7 @@ vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32 // CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { @@ -719,7 +780,7 @@ vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32 // CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { @@ -729,7 +790,7 @@ vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int6 // CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { @@ -739,7 +800,7 @@ vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int6 // CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { @@ -749,7 +810,7 @@ vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int6 // CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { @@ -759,7 +820,7 @@ vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64 // CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { @@ -769,7 +830,7 @@ vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { @@ -779,7 +840,7 @@ vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { @@ -789,7 +850,7 @@ vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { @@ -799,7 +860,7 @@ vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { @@ -809,7 +870,7 @@ vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { @@ -819,7 +880,7 @@ vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { @@ -829,7 +890,7 @@ vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t // CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { @@ -839,7 +900,7 @@ vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { @@ -849,7 +910,7 @@ vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { @@ -859,7 +920,7 @@ vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { @@ -869,7 +930,7 @@ vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { @@ -879,7 +940,7 @@ vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { @@ -889,7 +950,7 @@ vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { @@ -899,7 +960,7 @@ vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { @@ -909,7 +970,7 @@ vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { @@ -919,7 +980,7 @@ vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { @@ -929,7 +990,7 @@ vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { @@ -939,7 +1000,7 @@ vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uin // CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { @@ -949,7 +1010,7 @@ vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { @@ -959,7 +1020,7 @@ vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { @@ -969,17 +1030,77 @@ vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const ui // CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { return vle64_v_u64m8_m(mask, maskedoff, base, vl); } +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vle16_v_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16mf4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vle16_v_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vle16_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vle16_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vle16_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vle16_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { + return vle16_v_f16m8_m(mask, maskedoff, base, vl); +} + // CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { @@ -989,7 +1110,7 @@ vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, con // CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { @@ -999,7 +1120,7 @@ vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { @@ -1009,7 +1130,7 @@ vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { @@ -1019,7 +1140,7 @@ vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const f // CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { @@ -1029,7 +1150,7 @@ vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const f // CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { @@ -1039,7 +1160,7 @@ vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { @@ -1049,7 +1170,7 @@ vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { @@ -1059,199 +1180,599 @@ vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { return vle64_v_f64m8_m(mask, maskedoff, base, vl); } -// CHECK-RV64-LABEL: @test_vle1_v_b1( +// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv64i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool1_t test_vle1_v_b1(const uint8_t *base, size_t vl) { - return vle1_v_b1(base, vl); +vint8mf8_t test_vle8_v_i8mf8_mt(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8mf8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b2( +// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv32i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool2_t test_vle1_v_b2(const uint8_t *base, size_t vl) { - return vle1_v_b2(base, vl); +vint8mf4_t test_vle8_v_i8mf4_mt(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b4( +// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv16i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool4_t test_vle1_v_b4(const uint8_t *base, size_t vl) { - return vle1_v_b4(base, vl); +vint8mf2_t test_vle8_v_i8mf2_mt(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b8( +// CHECK-RV64-LABEL: @test_vle8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv8i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool8_t test_vle1_v_b8(const uint8_t *base, size_t vl) { - return vle1_v_b8(base, vl); +vint8m1_t test_vle8_v_i8m1_mt(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b16( +// CHECK-RV64-LABEL: @test_vle8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv4i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool16_t test_vle1_v_b16(const uint8_t *base, size_t vl) { - return vle1_v_b16(base, vl); +vint8m2_t test_vle8_v_i8m2_mt(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b32( +// CHECK-RV64-LABEL: @test_vle8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv2i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool32_t test_vle1_v_b32(const uint8_t *base, size_t vl) { - return vle1_v_b32(base, vl); +vint8m4_t test_vle8_v_i8m4_mt(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle1_v_b64( +// CHECK-RV64-LABEL: @test_vle8_v_i8m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle1.nxv1i1.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vbool64_t test_vle1_v_b64(const uint8_t *base, size_t vl) { - return vle1_v_b64(base, vl); +vint8m8_t test_vle8_v_i8m8_mt(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl, uint8_t ta) { + return vle8_v_i8m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vle16_v_f16mf4(const _Float16 *base, size_t vl) { - return vle16_v_f16mf4(base, vl); +vint16mf4_t test_vle16_v_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vle16_v_f16mf2(const _Float16 *base, size_t vl) { - return vle16_v_f16mf2(base, vl); +vint16mf2_t test_vle16_v_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m1( +// CHECK-RV64-LABEL: @test_vle16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vle16_v_f16m1(const _Float16 *base, size_t vl) { - return vle16_v_f16m1(base, vl); +vint16m1_t test_vle16_v_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m2( +// CHECK-RV64-LABEL: @test_vle16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vle16_v_f16m2(const _Float16 *base, size_t vl) { - return vle16_v_f16m2(base, vl); +vint16m2_t test_vle16_v_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m4( +// CHECK-RV64-LABEL: @test_vle16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vle16_v_f16m4(const _Float16 *base, size_t vl) { - return vle16_v_f16m4(base, vl); +vint16m4_t test_vle16_v_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m8( +// CHECK-RV64-LABEL: @test_vle16_v_i16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vle16_v_f16m8(const _Float16 *base, size_t vl) { - return vle16_v_f16m8(base, vl); +vint16m8_t test_vle16_v_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl, uint8_t ta) { + return vle16_v_i16m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32_v_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32_v_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32_v_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32_v_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32_v_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl, uint8_t ta) { + return vle32_v_i32m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64_v_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64_v_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64_v_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64_v_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl, uint8_t ta) { + return vle64_v_i64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8_v_u8mf8_mt(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8mf8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8_v_u8mf4_mt(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8_v_u8mf2_mt(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8_v_u8m1_mt(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8_v_u8m2_mt(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8_v_u8m4_mt(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8_v_u8m8_mt(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl, uint8_t ta) { + return vle8_v_u8m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16_v_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16_v_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16_v_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16_v_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16_v_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16_v_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl, uint8_t ta) { + return vle16_v_u16m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32_v_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32_v_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32_v_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32_v_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32_v_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl, uint8_t ta) { + return vle32_v_u32m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64_v_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64_v_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64_v_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64_v_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl, uint8_t ta) { + return vle64_v_u64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vle16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf4_m (mask, maskedoff, base, vl); +vfloat16mf4_t test_vle16_v_f16mf4_mt(vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16mf4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vle16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vle16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16mf2_m (mask, maskedoff, base, vl); +vfloat16mf2_t test_vle16_v_f16mf2_mt(vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vle16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vle16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m1_m (mask, maskedoff, base, vl); +vfloat16m1_t test_vle16_v_f16m1_mt(vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vle16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vle16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m2_m (mask, maskedoff, base, vl); +vfloat16m2_t test_vle16_v_f16m2_mt(vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vle16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vle16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m4_m (mask, maskedoff, base, vl); +vfloat16m4_t test_vle16_v_f16m4_mt(vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vle16_v_f16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vle16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl) { - return vle16_v_f16m8_m (mask, maskedoff, base, vl); +vfloat16m8_t test_vle16_v_f16m8_mt(vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t vl, uint8_t ta) { + return vle16_v_f16m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32_v_f32mf2_mt(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32mf2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32_v_f32m1_mt(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32_v_f32m2_mt(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32_v_f32m4_mt(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32_v_f32m8_mt(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl, uint8_t ta) { + return vle32_v_f32m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64_v_f64m1_mt(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m1_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64_v_f64m2_mt(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m2_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64_v_f64m4_mt(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m4_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64_v_f64m8_mt(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl, uint8_t ta) { + return vle64_v_f64m8_mt(mask, maskedoff, base, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c index d56c624..c81a052 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c @@ -191,7 +191,7 @@ vuint8m8_t test_vle8ff_v_u8m8 (const uint8_t *base, size_t *new_vl, size_t vl) { // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -204,7 +204,7 @@ vint8mf8_t test_vle8ff_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -217,7 +217,7 @@ vint8mf4_t test_vle8ff_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -230,7 +230,7 @@ vint8mf2_t test_vle8ff_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -243,7 +243,7 @@ vint8m1_t test_vle8ff_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -256,7 +256,7 @@ vint8m2_t test_vle8ff_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -269,7 +269,7 @@ vint8m4_t test_vle8ff_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -282,7 +282,7 @@ vint8m8_t test_vle8ff_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -295,7 +295,7 @@ vuint8mf8_t test_vle8ff_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -308,7 +308,7 @@ vuint8mf4_t test_vle8ff_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -321,7 +321,7 @@ vuint8mf2_t test_vle8ff_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -334,7 +334,7 @@ vuint8m1_t test_vle8ff_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -347,7 +347,7 @@ vuint8m2_t test_vle8ff_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -360,7 +360,7 @@ vuint8m4_t test_vle8ff_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -370,6 +370,188 @@ vuint8m8_t test_vle8ff_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint return vle8ff_v_u8m8_m(mask, maskedoff, base, new_vl, vl); } +// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8mf8_t test_vle8ff_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8mf8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8mf4_t test_vle8ff_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8mf4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8mf2_t test_vle8ff_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8m1_t test_vle8ff_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8m2_t test_vle8ff_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8m4_t test_vle8ff_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint8m8_t test_vle8ff_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_i8m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8mf8_t test_vle8ff_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8mf8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8mf4_t test_vle8ff_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8mf4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8mf2_t test_vle8ff_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8m1_t test_vle8ff_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8m2_t test_vle8ff_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8m4_t test_vle8ff_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint8m8_t test_vle8ff_v_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle8ff_v_u8m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -529,7 +711,7 @@ vuint16m8_t test_vle16ff_v_u16m8 (const uint16_t *base, size_t *new_vl, size_t v // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -542,7 +724,7 @@ vint16mf4_t test_vle16ff_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, cons // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -555,7 +737,7 @@ vint16mf2_t test_vle16ff_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, cons // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -568,7 +750,7 @@ vint16m1_t test_vle16ff_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -581,7 +763,7 @@ vint16m2_t test_vle16ff_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -594,7 +776,7 @@ vint16m4_t test_vle16ff_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -607,7 +789,7 @@ vint16m8_t test_vle16ff_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -620,7 +802,7 @@ vuint16mf4_t test_vle16ff_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, co // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -633,7 +815,7 @@ vuint16mf2_t test_vle16ff_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, co // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -646,7 +828,7 @@ vuint16m1_t test_vle16ff_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -659,7 +841,7 @@ vuint16m2_t test_vle16ff_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -672,7 +854,7 @@ vuint16m4_t test_vle16ff_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -682,394 +864,875 @@ vuint16m8_t test_vle16ff_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const return vle16ff_v_u16m8_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2( +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2(base, new_vl, vl); +vfloat16mf4_t test_vle16ff_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf4_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1( +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1(base, new_vl, vl); +vfloat16mf2_t test_vle16ff_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf2_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2( +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2(base, new_vl, vl); +vfloat16m1_t test_vle16ff_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m1_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4( +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4(base, new_vl, vl); +vfloat16m2_t test_vle16ff_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m2_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8( +// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8(base, new_vl, vl); +vint16mf4_t test_vle16ff_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_i16mf4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint16mf2_t test_vle16ff_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_i16mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint16m1_t test_vle16ff_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_i16m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint16m2_t test_vle16ff_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_i16m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint16m4_t test_vle16ff_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_i16m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint16m8_t test_vle16ff_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_i16m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint16mf4_t test_vle16ff_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_u16mf4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint16mf2_t test_vle16ff_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_u16mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint16m1_t test_vle16ff_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_u16m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint16m2_t test_vle16ff_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_u16m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint16m4_t test_vle16ff_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_u16m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint16m8_t test_vle16ff_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_u16m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16mf4_t test_vle16ff_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_f16mf4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16mf2_t test_vle16ff_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_f16mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m1_t test_vle16ff_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_f16m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m2_t test_vle16ff_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_f16m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m4_t test_vle16ff_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_f16m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m8_t test_vle16ff_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle16ff_v_f16m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32mf2_t test_vle32ff_v_i32mf2 (const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32mf2(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m1_t test_vle32ff_v_i32m1 (const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m1(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m2_t test_vle32ff_v_i32m2 (const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m2(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m4_t test_vle32ff_v_i32m4 (const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m4(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m8_t test_vle32ff_v_i32m8 (const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m8(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32mf2(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m1(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m2(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m4(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m8(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32mf2(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m1(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m2(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m4(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m8(base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32mf2_t test_vle32ff_v_u32mf2 (const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2(base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m1_t test_vle32ff_v_u32m1 (const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1(base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m2_t test_vle32ff_v_u32m2 (const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2(base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m4_t test_vle32ff_v_u32m4 (const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4(base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m8_t test_vle32ff_v_u32m8 (const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8(base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { + return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32mf2_t test_vle32ff_v_f32mf2 (const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2(base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m1_t test_vle32ff_v_f32m1 (const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1(base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m2_t test_vle32ff_v_f32m2 (const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2(base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m4_t test_vle32ff_v_f32m4 (const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4(base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m8_t test_vle32ff_v_f32m8 (const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8(base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { + return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vint32mf2_t test_vle32ff_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl); +vint32mf2_t test_vle32ff_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_i32mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m1_t test_vle32ff_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl); +vint32m1_t test_vle32ff_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_i32m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m2_t test_vle32ff_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl); +vint32m2_t test_vle32ff_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_i32m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m4_t test_vle32ff_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl); +vint32m4_t test_vle32ff_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_i32m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vint32m8_t test_vle32ff_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl); +vint32m8_t test_vle32ff_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_i32m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32mf2_t test_vle32ff_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl); +vuint32mf2_t test_vle32ff_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_u32mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m1_t test_vle32ff_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl); +vuint32m1_t test_vle32ff_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_u32m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m2_t test_vle32ff_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl); +vuint32m2_t test_vle32ff_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_u32m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m4_t test_vle32ff_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl); +vuint32m4_t test_vle32ff_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_u32m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vuint32m8_t test_vle32ff_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl) { - return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl); +vuint32m8_t test_vle32ff_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_u32m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32mf2_t test_vle32ff_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl); +vfloat32mf2_t test_vle32ff_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_f32mf2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m1_t test_vle32ff_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl); +vfloat32m1_t test_vle32ff_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_f32m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m2_t test_vle32ff_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl); +vfloat32m2_t test_vle32ff_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_f32m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m4_t test_vle32ff_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl); +vfloat32m4_t test_vle32ff_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_f32m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat32m8_t test_vle32ff_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl) { - return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl); +vfloat32m8_t test_vle32ff_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle32ff_v_f32m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1( @@ -1231,7 +1894,7 @@ vfloat64m8_t test_vle64ff_v_f64m8 (const double *base, size_t *new_vl, size_t vl // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1244,7 +1907,7 @@ vint64m1_t test_vle64ff_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1257,7 +1920,7 @@ vint64m2_t test_vle64ff_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1270,7 +1933,7 @@ vint64m4_t test_vle64ff_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const i // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1283,7 +1946,7 @@ vint64m8_t test_vle64ff_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const in // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1296,7 +1959,7 @@ vuint64m1_t test_vle64ff_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1309,7 +1972,7 @@ vuint64m2_t test_vle64ff_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1322,7 +1985,7 @@ vuint64m4_t test_vle64ff_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1335,7 +1998,7 @@ vuint64m8_t test_vle64ff_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1348,7 +2011,7 @@ vfloat64m1_t test_vle64ff_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, con // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1361,7 +2024,7 @@ vfloat64m2_t test_vle64ff_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, con // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1374,7 +2037,7 @@ vfloat64m4_t test_vle64ff_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, con // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 // CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 @@ -1462,80 +2125,184 @@ vfloat16m8_t test_vle16ff_v_f16m8 (const _Float16 *base, size_t *new_vl, size_t return vle16ff_v_f16m8 (base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat16mf4_t test_vle16ff_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf4_m (mask, maskedoff, base, new_vl, vl); +vfloat16m4_t test_vle16ff_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m4_m (mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat16mf2_t test_vle16ff_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16mf2_m (mask, maskedoff, base, new_vl, vl); +vfloat16m8_t test_vle16ff_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m8_m (mask, maskedoff, base, new_vl, vl); } -// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat16m1_t test_vle16ff_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m1_m (mask, maskedoff, base, new_vl, vl); +vint64m1_t test_vle64ff_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_i64m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat16m2_t test_vle16ff_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m2_m (mask, maskedoff, base, new_vl, vl); +vint64m2_t test_vle64ff_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_i64m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat16m4_t test_vle16ff_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m4_m (mask, maskedoff, base, new_vl, vl); +vint64m4_t test_vle64ff_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_i64m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 // CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 -// CHECK-RV64-NEXT: ret [[TMP2]] +// CHECK-RV64-NEXT: ret [[TMP2]] // -vfloat16m8_t test_vle16ff_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { - return vle16ff_v_f16m8_m (mask, maskedoff, base, new_vl, vl); +vint64m8_t test_vle64ff_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_i64m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint64m1_t test_vle64ff_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_u64m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint64m2_t test_vle64ff_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_u64m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint64m4_t test_vle64ff_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_u64m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vuint64m8_t test_vle64ff_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_u64m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat64m1_t test_vle64ff_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_f64m1_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat64m2_t test_vle64ff_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_f64m2_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat64m4_t test_vle64ff_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_f64m4_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat64m8_t test_vle64ff_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vle64ff_v_f64m8_mt(mask, maskedoff, base, new_vl, vl, VE_TAIL_AGNOSTIC); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c index df71bad..6c5c86b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c @@ -942,6 +942,141 @@ vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { return vlmul_ext_v_u64m4_u64m8(op1); } +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2(vfloat16mf4_t op1) { + return vlmul_ext_v_f16mf4_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1(vfloat16mf4_t op1) { + return vlmul_ext_v_f16mf4_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2(vfloat16mf4_t op1) { + return vlmul_ext_v_f16mf4_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4(vfloat16mf4_t op1) { + return vlmul_ext_v_f16mf4_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8(vfloat16mf4_t op1) { + return vlmul_ext_v_f16mf4_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1(vfloat16mf2_t op1) { + return vlmul_ext_v_f16mf2_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2(vfloat16mf2_t op1) { + return vlmul_ext_v_f16mf2_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4(vfloat16mf2_t op1) { + return vlmul_ext_v_f16mf2_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8(vfloat16mf2_t op1) { + return vlmul_ext_v_f16mf2_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2(vfloat16m1_t op1) { + return vlmul_ext_v_f16m1_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4(vfloat16m1_t op1) { + return vlmul_ext_v_f16m1_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8(vfloat16m1_t op1) { + return vlmul_ext_v_f16m1_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4(vfloat16m2_t op1) { + return vlmul_ext_v_f16m2_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8(vfloat16m2_t op1) { + return vlmul_ext_v_f16m2_f16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8(vfloat16m4_t op1) { + return vlmul_ext_v_f16m4_f16m8(op1); +} + // CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) @@ -2018,289 +2153,10 @@ vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4 (vuint64m8_t op1) { return vlmul_trunc_v_u64m8_u64m4(op1); } -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { - return vlmul_trunc_v_f32m1_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { - return vlmul_trunc_v_f32m2_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { - return vlmul_trunc_v_f32m2_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { - return vlmul_trunc_v_f32m4_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { - return vlmul_trunc_v_f32m4_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { - return vlmul_trunc_v_f32m4_f32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { - return vlmul_trunc_v_f32m8_f32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { - return vlmul_trunc_v_f64m2_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { - return vlmul_trunc_v_f64m4_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { - return vlmul_trunc_v_f64m4_f64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { - return vlmul_trunc_v_f64m8_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { - return vlmul_trunc_v_f64m8_f64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { - return vlmul_trunc_v_f64m8_f64m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f16.nxv1f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_ext_v_f16mf4_f16mf2 (vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv1f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_ext_v_f16mf4_f16m1 (vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv1f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_ext_v_f16mf4_f16m2 (vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv1f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vlmul_ext_v_f16mf4_f16m4 (vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf4_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv1f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vlmul_ext_v_f16mf4_f16m8 (vfloat16mf4_t op1) { - return vlmul_ext_v_f16mf4_f16m8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f16.nxv2f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_ext_v_f16mf2_f16m1 (vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv2f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_ext_v_f16mf2_f16m2 (vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv2f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vlmul_ext_v_f16mf2_f16m4 (vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16mf2_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv2f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vlmul_ext_v_f16mf2_f16m8 (vfloat16mf2_t op1) { - return vlmul_ext_v_f16mf2_f16m8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f16.nxv4f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_ext_v_f16m1_f16m2 (vfloat16m1_t op1) { - return vlmul_ext_v_f16m1_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv4f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vlmul_ext_v_f16m1_f16m4 (vfloat16m1_t op1) { - return vlmul_ext_v_f16m1_f16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m1_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv4f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vlmul_ext_v_f16m1_f16m8 (vfloat16m1_t op1) { - return vlmul_ext_v_f16m1_f16m8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f16.nxv8f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vlmul_ext_v_f16m2_f16m4 (vfloat16m2_t op1) { - return vlmul_ext_v_f16m2_f16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m2_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv8f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vlmul_ext_v_f16m2_f16m8 (vfloat16m2_t op1) { - return vlmul_ext_v_f16m2_f16m8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_ext_v_f16m4_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32f16.nxv16f16( undef, [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vlmul_ext_v_f16m4_f16m8 (vfloat16m4_t op1) { - return vlmul_ext_v_f16m4_f16m8(op1); -} - // CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) @@ -2435,3 +2291,148 @@ vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2 (vfloat16m8_t op1) { vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4 (vfloat16m8_t op1) { return vlmul_trunc_v_f16m8_f16m4(op1); } + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2 (vfloat32m1_t op1) { + return vlmul_trunc_v_f32m1_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2 (vfloat32m2_t op1) { + return vlmul_trunc_v_f32m2_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1 (vfloat32m2_t op1) { + return vlmul_trunc_v_f32m2_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2 (vfloat32m4_t op1) { + return vlmul_trunc_v_f32m4_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1 (vfloat32m4_t op1) { + return vlmul_trunc_v_f32m4_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2 (vfloat32m4_t op1) { + return vlmul_trunc_v_f32m4_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2 (vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1 (vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2 (vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4 (vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1 (vfloat64m2_t op1) { + return vlmul_trunc_v_f64m2_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1 (vfloat64m4_t op1) { + return vlmul_trunc_v_f64m4_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2 (vfloat64m4_t op1) { + return vlmul_trunc_v_f64m4_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1 (vfloat64m8_t op1) { + return vlmul_trunc_v_f64m8_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2 (vfloat64m8_t op1) { + return vlmul_trunc_v_f64m8_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4 (vfloat64m8_t op1) { + return vlmul_trunc_v_f64m8_f64m4(op1); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c index 92d672d..f0ea80d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c @@ -1566,6 +1566,216 @@ vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size return vloxei64_v_u64m8(base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f16m2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f16m4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_f16m8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f16m2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f16m4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_f16m8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f16m2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_f16m4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f16m2(base, bindex, vl); +} + // CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1912,2336 +2122,4247 @@ vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8 (const double *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f16mf4_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f16mf2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f16m1_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f16m2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f16m4_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vloxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_f16m8_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f16mf4_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f16mf2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f16m1_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f16m2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f16m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_f16m8_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f16m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f16m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_f16m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f16m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f16m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei8_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei8_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei8_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei8_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei8_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei16_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vloxei16_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei16_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei16_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vloxei16_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vloxei32_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4 (base, bindex, vl); +vint8mf8_t test_vloxei8_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2 (base, bindex, vl); +vint8mf4_t test_vloxei8_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1 (base, bindex, vl); +vint8mf2_t test_vloxei8_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2 (base, bindex, vl); +vint8m1_t test_vloxei8_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vloxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4 (base, bindex, vl); +vint8m2_t test_vloxei8_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vloxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8 (base, bindex, vl); +vint8m4_t test_vloxei8_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4 (base, bindex, vl); +vint8m8_t test_vloxei8_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i8m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2 (base, bindex, vl); +vint8mf8_t test_vloxei16_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1 (base, bindex, vl); +vint8mf4_t test_vloxei16_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2 (base, bindex, vl); +vint8mf2_t test_vloxei16_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vloxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4 (base, bindex, vl); +vint8m1_t test_vloxei16_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vloxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8 (base, bindex, vl); +vint8m2_t test_vloxei16_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4 (base, bindex, vl); +vint8m4_t test_vloxei16_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2 (base, bindex, vl); +vint8mf8_t test_vloxei32_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei32_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei32_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei32_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vloxei32_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vloxei64_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vloxei64_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vloxei64_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vloxei64_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei8_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei8_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei8_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei8_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei8_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei8_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei16_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei16_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei16_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei16_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei16_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vloxei16_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei32_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei32_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei32_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei32_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vloxei32_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vloxei64_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vloxei64_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vloxei64_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vloxei64_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei8_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei8_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei8_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei8_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei8_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei16_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei16_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei16_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei16_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei16_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei32_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei32_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei32_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei32_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vloxei32_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vloxei64_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vloxei64_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vloxei64_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vloxei64_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei8_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei8_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei8_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei8_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei16_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei16_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei16_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei16_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei32_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei32_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei32_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei32_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vloxei64_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vloxei64_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vloxei64_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vloxei64_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei8_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei8_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei8_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei8_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei8_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei8_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vloxei8_v_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u8m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei16_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei16_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei16_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei16_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei16_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vloxei16_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei32_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei32_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei32_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei32_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vloxei32_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vloxei64_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vloxei64_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vloxei64_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vloxei64_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei8_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei8_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei8_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei8_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei8_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei8_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei16_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei16_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei16_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei16_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei16_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vloxei16_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei32_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei32_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei32_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei32_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vloxei32_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vloxei64_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vloxei64_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vloxei64_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vloxei64_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei8_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei8_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei8_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei8_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei8_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei16_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei16_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei16_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei16_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei16_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei32_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei32_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei32_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei32_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vloxei32_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vloxei64_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vloxei64_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vloxei64_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vloxei64_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei8_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei8_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei8_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei8_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei16_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei16_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei16_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei16_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei32_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei32_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei32_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei32_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vloxei64_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vloxei64_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vloxei64_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vloxei64_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1 (base, bindex, vl); +vfloat16mf4_t test_vloxei16_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2 (base, bindex, vl); +vfloat16m2_t test_vloxei16_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vloxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4 (base, bindex, vl); +vfloat16m4_t test_vloxei16_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4 (base, bindex, vl); +vfloat16mf4_t test_vloxei32_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2 (base, bindex, vl); +vfloat16mf2_t test_vloxei32_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1 (base, bindex, vl); +vfloat16m1_t test_vloxei32_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2 (base, bindex, vl); +vfloat16m2_t test_vloxei32_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxei8_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vloxei64_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxei8_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vloxei64_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxei8_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vloxei64_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxei8_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vloxei64_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vloxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxei8_v_f16m4_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei8_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vloxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vloxei8_v_f16m8_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei8_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxei16_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei8_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxei16_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei8_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxei16_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei8_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxei16_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei16_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vloxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxei16_v_f16m4_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei16_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vloxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vloxei16_v_f16m8_m (mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei16_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxei32_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei16_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxei32_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei16_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxei32_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei32_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxei32_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei32_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vloxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxei32_v_f16m4_m (mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vloxei32_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vloxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxei64_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vloxei32_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vloxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxei64_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vloxei32_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vloxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxei64_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vloxei64_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vloxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxei64_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vloxei64_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vloxei64_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vloxei64_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei8_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei8_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei8_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei8_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxei8_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei16_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei16_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei16_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei16_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxei16_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei32_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei32_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei32_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei32_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxei32_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vloxei64_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vloxei64_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vloxei64_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vloxei64_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxei64_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c index 1f61511..eadafb8 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg.c @@ -12335,537 +12335,3000 @@ void test_vloxseg2ei64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t return vloxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); -} +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 @@ -12873,15932 +15336,29427 @@ void test_vloxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vloxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vloxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); +void test_vloxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); +void test_vloxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); +void test_vloxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); +void test_vloxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); +void test_vloxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); +void test_vloxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } - -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); +// +void test_vloxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); +void test_vloxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); +void test_vloxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +void test_vloxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { + return vloxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { + return vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { + return vloxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { + return vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { + return vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { + return vloxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { + return vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { + return vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { + return vloxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +// +void test_vloxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vloxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { + return vloxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } - -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 + +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { + return vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { + return vloxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { + return vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { + return vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { + return vloxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vloxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vloxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei64_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +// +void test_vloxseg4ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei32_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg7ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg8ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg5ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei8_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei16_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei32_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg5ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg6ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg7ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg8ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg3ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg4ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg2ei64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28807,13 +44765,13 @@ void test_vloxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28824,13 +44782,13 @@ void test_vloxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28843,13 +44801,13 @@ void test_vloxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28864,13 +44822,13 @@ void test_vloxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28887,13 +44845,13 @@ void test_vloxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28912,26 +44870,26 @@ void test_vloxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28940,13 +44898,13 @@ void test_vloxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28957,13 +44915,13 @@ void test_vloxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28976,13 +44934,13 @@ void test_vloxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28997,13 +44955,13 @@ void test_vloxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29020,13 +44978,13 @@ void test_vloxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29045,26 +45003,26 @@ void test_vloxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29073,13 +45031,13 @@ void test_vloxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29090,13 +45048,13 @@ void test_vloxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29109,13 +45067,13 @@ void test_vloxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29130,13 +45088,13 @@ void test_vloxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29153,13 +45111,13 @@ void test_vloxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29178,26 +45136,26 @@ void test_vloxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29206,13 +45164,13 @@ void test_vloxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29223,39 +45181,39 @@ void test_vloxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); +void test_vloxseg2ei8_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29264,13 +45222,13 @@ void test_vloxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29281,13 +45239,13 @@ void test_vloxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29300,13 +45258,13 @@ void test_vloxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29321,13 +45279,13 @@ void test_vloxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29344,13 +45302,13 @@ void test_vloxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29369,26 +45327,26 @@ void test_vloxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29397,13 +45355,13 @@ void test_vloxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29414,13 +45372,13 @@ void test_vloxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29433,13 +45391,13 @@ void test_vloxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29454,13 +45412,13 @@ void test_vloxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29477,13 +45435,13 @@ void test_vloxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29502,26 +45460,26 @@ void test_vloxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29530,13 +45488,13 @@ void test_vloxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29547,13 +45505,13 @@ void test_vloxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29566,13 +45524,13 @@ void test_vloxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29587,13 +45545,13 @@ void test_vloxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29610,13 +45568,13 @@ void test_vloxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29635,26 +45593,26 @@ void test_vloxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29663,13 +45621,13 @@ void test_vloxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29680,39 +45638,39 @@ void test_vloxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); +void test_vloxseg2ei16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29721,13 +45679,13 @@ void test_vloxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29738,13 +45696,13 @@ void test_vloxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29757,13 +45715,13 @@ void test_vloxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29778,13 +45736,13 @@ void test_vloxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29801,13 +45759,13 @@ void test_vloxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29826,26 +45784,26 @@ void test_vloxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); +void test_vloxseg2ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29854,13 +45812,13 @@ void test_vloxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29871,13 +45829,13 @@ void test_vloxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29890,13 +45848,13 @@ void test_vloxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29911,13 +45869,13 @@ void test_vloxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29934,13 +45892,13 @@ void test_vloxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29959,26 +45917,26 @@ void test_vloxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29987,13 +45945,13 @@ void test_vloxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30004,13 +45962,13 @@ void test_vloxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -30023,13 +45981,13 @@ void test_vloxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -30044,13 +46002,13 @@ void test_vloxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -30067,13 +46025,13 @@ void test_vloxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -30092,26 +46050,26 @@ void test_vloxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30120,13 +46078,13 @@ void test_vloxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30137,39 +46095,39 @@ void test_vloxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); +void test_vloxseg2ei32_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30178,13 +46136,13 @@ void test_vloxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30195,13 +46153,13 @@ void test_vloxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -30213,14 +46171,14 @@ void test_vloxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +// +void test_vloxseg5ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -30235,13 +46193,13 @@ void test_vloxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -30258,13 +46216,13 @@ void test_vloxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -30283,26 +46241,26 @@ void test_vloxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); +void test_vloxseg2ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30311,13 +46269,13 @@ void test_vloxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30328,13 +46286,13 @@ void test_vloxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg4ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -30347,13 +46305,13 @@ void test_vloxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg5ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -30368,13 +46326,13 @@ void test_vloxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg6ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -30391,13 +46349,13 @@ void test_vloxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg7ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -30416,26 +46374,26 @@ void test_vloxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg8ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); +void test_vloxseg2ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30444,1971 +46402,2217 @@ void test_vloxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vloxseg3ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg2ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg3ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg4ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg6ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg8ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vloxseg3ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vloxseg4ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vloxseg5ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vloxseg6ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); +void test_vloxseg7ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vloxseg8ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vloxseg2ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei8_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg5ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg7ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg8ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg2ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg3ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg4ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg5ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vloxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei16_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vloxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vloxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vloxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg6ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vloxseg7ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg5ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg6ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg7ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vloxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vloxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vloxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vloxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg8ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg3ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg4ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg2ei64_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg5ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei8_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg6ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei8_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg7ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei8_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg8ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei8_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vloxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg3ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei8_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei8_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei8_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei8_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei8_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei16_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vloxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei16_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei16_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei16_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vloxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vloxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei16_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg4ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei16_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei16_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg2ei16_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei16_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg5ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei32_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg6ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei32_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg7ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei32_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg8ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei32_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg3ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei32_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg4ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei32_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg2ei32_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei32_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg5ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg5ei64_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg5ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vloxseg6ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg6ei64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg6ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vloxseg7ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg7ei64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg7ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vloxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vloxseg8ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vloxseg8ei64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg8ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vloxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vloxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vloxseg2ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vloxseg3ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg3ei64_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vloxseg4ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vloxseg4ei64_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vloxseg4ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vloxseg2ei64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vloxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vloxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vloxseg2ei64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vloxseg2ei64_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c index 58c6570..95d974d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c @@ -486,6 +486,66 @@ vuint64m8_t test_vlse64_v_u64m8(const uint64_t *base, ptrdiff_t bstride, return vlse64_v_u64m8(base, bstride, vl); } +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vlse16_v_f16mf4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf4(base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vlse16_v_f16mf2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf2(base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vlse16_v_f16m1(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m1(base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vlse16_v_f16m2(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m2(base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vlse16_v_f16m4(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m4(base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vlse16_v_f16m8(const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m8(base, bstride, vl); +} + // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -580,763 +640,1187 @@ vfloat64m4_t test_vlse64_v_f64m4(const double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f64.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vlse64_v_f64m8(const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8 (const double *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_f64m8(base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vlse8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8mf8_t test_vlse8_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8mf8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vlse8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8mf4_t test_vlse8_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8mf4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vlse8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8mf2_t test_vlse8_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vlse8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m1_t test_vlse8_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vlse8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m2_t test_vlse8_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vlse8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m4_t test_vlse8_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vlse8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - const int8_t *base, ptrdiff_t bstride, - size_t vl) { +vint8m8_t test_vlse8_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_i8m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vlse16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16mf4_t test_vlse16_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_i16mf4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vlse16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16mf2_t test_vlse16_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_i16mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vlse16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m1_t test_vlse16_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_i16m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vlse16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m2_t test_vlse16_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_i16m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vlse16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m4_t test_vlse16_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_i16m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vlse16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - const int16_t *base, ptrdiff_t bstride, - size_t vl) { +vint16m8_t test_vlse16_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_i16m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vlse32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32mf2_t test_vlse32_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_i32mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vlse32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m1_t test_vlse32_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_i32m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vlse32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m2_t test_vlse32_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_i32m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vlse32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m4_t test_vlse32_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_i32m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vlse32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - const int32_t *base, ptrdiff_t bstride, - size_t vl) { +vint32m8_t test_vlse32_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_i32m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vlse64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m1_t test_vlse64_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_i64m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vlse64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m2_t test_vlse64_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_i64m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vlse64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m4_t test_vlse64_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_i64m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vlse64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - const int64_t *base, ptrdiff_t bstride, - size_t vl) { +vint64m8_t test_vlse64_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_i64m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vlse8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8mf8_t test_vlse8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8mf8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vlse8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8mf4_t test_vlse8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8mf4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vlse8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8mf2_t test_vlse8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vlse8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m1_t test_vlse8_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vlse8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m2_t test_vlse8_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vlse8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m4_t test_vlse8_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vlse8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - const uint8_t *base, ptrdiff_t bstride, - size_t vl) { +vuint8m8_t test_vlse8_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl) { return vlse8_v_u8m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vlse16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16mf4_t test_vlse16_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_u16mf4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vlse16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16mf2_t test_vlse16_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_u16mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vlse16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m1_t test_vlse16_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_u16m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vlse16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m2_t test_vlse16_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_u16m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vlse16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m4_t test_vlse16_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_u16m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vlse16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - const uint16_t *base, ptrdiff_t bstride, - size_t vl) { +vuint16m8_t test_vlse16_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl) { return vlse16_v_u16m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vlse32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32mf2_t test_vlse32_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_u32mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vlse32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m1_t test_vlse32_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_u32m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vlse32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m2_t test_vlse32_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_u32m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vlse32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m4_t test_vlse32_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_u32m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vlse32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - const uint32_t *base, ptrdiff_t bstride, - size_t vl) { +vuint32m8_t test_vlse32_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_u32m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vlse64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m1_t test_vlse64_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_u64m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vlse64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m2_t test_vlse64_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_u64m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vlse64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m4_t test_vlse64_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_u64m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vlse64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - const uint64_t *base, ptrdiff_t bstride, - size_t vl) { +vuint64m8_t test_vlse64_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_u64m8_m(mask, maskedoff, base, bstride, vl); } +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vlse16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf4_m(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vlse16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf2_m(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vlse16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m1_m(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vlse16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m2_m(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vlse16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m4_m(mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vlse16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m8_m(mask, maskedoff, base, bstride, vl); +} + // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vlse32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32mf2_t test_vlse32_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_f32mf2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vlse32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m1_t test_vlse32_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_f32m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vlse32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m2_t test_vlse32_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_f32m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vlse32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m4_t test_vlse32_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_f32m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vlse32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - const float *base, ptrdiff_t bstride, - size_t vl) { +vfloat32m8_t test_vlse32_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl) { return vlse32_v_f32m8_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vlse64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m1_t test_vlse64_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_f64m1_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vlse64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m2_t test_vlse64_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_f64m2_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vlse64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m4_t test_vlse64_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_f64m4_m(mask, maskedoff, base, bstride, vl); } // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vlse64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - const double *base, ptrdiff_t bstride, - size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl) { return vlse64_v_f64m8_m(mask, maskedoff, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vlse16_v_f16mf4 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4 (base, bstride, vl); +vint8mf8_t test_vlse8_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8mf8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vlse16_v_f16mf2 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2 (base, bstride, vl); +vint8mf4_t test_vlse8_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8mf4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vlse16_v_f16m1 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1 (base, bstride, vl); +vint8mf2_t test_vlse8_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vlse16_v_f16m2 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2 (base, bstride, vl); +vint8m1_t test_vlse8_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m4( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vlse16_v_f16m4 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4 (base, bstride, vl); +vint8m2_t test_vlse8_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m8( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vlse16_v_f16m8 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8 (base, bstride, vl); +vint8m4_t test_vlse8_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlse8_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vlse8_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_i8m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vlse16_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_i16mf4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vlse16_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_i16mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vlse16_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_i16m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vlse16_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_i16m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vlse16_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_i16m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vlse16_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_i16m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vlse32_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_i32mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vlse32_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_i32m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vlse32_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_i32m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vlse32_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_i32m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vlse32_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_i32m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vlse64_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_i64m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vlse64_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_i64m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vlse64_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_i64m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vlse64_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_i64m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vlse8_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8mf8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vlse8_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8mf4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vlse8_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vlse8_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vlse8_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vlse8_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vlse8_v_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse8_v_u8m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vlse16_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_u16mf4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vlse16_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_u16mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vlse16_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_u16m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vlse16_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_u16m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vlse16_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_u16m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vlse16_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_u16m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vlse32_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_u32mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vlse32_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_u32m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vlse32_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_u32m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vlse32_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_u32m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vlse32_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_u32m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vlse64_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_u64m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vlse64_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_u64m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vlse64_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_u64m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vlse64_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_u64m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vlse16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf4_m (mask, maskedoff, base, bstride, vl); +vfloat16mf4_t test_vlse16_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_f16mf4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vlse16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16mf2_m (mask, maskedoff, base, bstride, vl); +vfloat16mf2_t test_vlse16_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_f16mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vlse16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m1_m (mask, maskedoff, base, bstride, vl); +vfloat16m1_t test_vlse16_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_f16m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vlse16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m2_m (mask, maskedoff, base, bstride, vl); +vfloat16m2_t test_vlse16_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_f16m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vlse16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m4_m (mask, maskedoff, base, bstride, vl); +vfloat16m4_t test_vlse16_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_f16m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vlse16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlse16_v_f16m8_m (mask, maskedoff, base, bstride, vl); +vfloat16m8_t test_vlse16_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse16_v_f16m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vlse32_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_f32mf2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vlse32_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_f32m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vlse32_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_f32m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vlse32_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_f32m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse32_v_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vlse32_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse32_v_f32m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vlse64_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_f64m1_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vlse64_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_f64m2_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vlse64_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_f64m4_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlse64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vlse64_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlse64_v_f64m8_mt(mask, maskedoff, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c index 50e1141..49db0c5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlseg.c @@ -1,27 +1,12 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -target-feature +experimental-zvlsseg -disable-O0-optnone \ -// RUN: -fallow-half-arguments-and-returns -emit-llvm %s -o - \ -// RUN: | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-zvlsseg \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -35,17 +20,6 @@ void test_vlseg2e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, return vlseg2e8_v_i8mf8(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -61,19 +35,6 @@ void test_vlseg3e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, cons return vlseg3e8_v_i8mf8(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -91,21 +52,6 @@ void test_vlseg4e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint return vlseg4e8_v_i8mf8(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -125,23 +71,6 @@ void test_vlseg5e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint return vlseg5e8_v_i8mf8(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -163,25 +92,6 @@ void test_vlseg6e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint return vlseg6e8_v_i8mf8(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -205,27 +115,6 @@ void test_vlseg7e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint return vlseg7e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -251,15 +140,6 @@ void test_vlseg8e8_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint return vlseg8e8_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -273,17 +153,6 @@ void test_vlseg2e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, return vlseg2e8_v_i8mf4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -299,19 +168,6 @@ void test_vlseg3e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, cons return vlseg3e8_v_i8mf4(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -329,21 +185,6 @@ void test_vlseg4e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint return vlseg4e8_v_i8mf4(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -363,23 +204,6 @@ void test_vlseg5e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint return vlseg5e8_v_i8mf4(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -401,25 +225,6 @@ void test_vlseg6e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint return vlseg6e8_v_i8mf4(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -443,27 +248,6 @@ void test_vlseg7e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint return vlseg7e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -489,15 +273,6 @@ void test_vlseg8e8_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint return vlseg8e8_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -511,17 +286,6 @@ void test_vlseg2e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, return vlseg2e8_v_i8mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -537,19 +301,6 @@ void test_vlseg3e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, cons return vlseg3e8_v_i8mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -567,21 +318,6 @@ void test_vlseg4e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint return vlseg4e8_v_i8mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -601,23 +337,6 @@ void test_vlseg5e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint return vlseg5e8_v_i8mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -639,25 +358,6 @@ void test_vlseg6e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint return vlseg6e8_v_i8mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -681,27 +381,6 @@ void test_vlseg7e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint return vlseg7e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -727,15 +406,6 @@ void test_vlseg8e8_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint return vlseg8e8_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -749,17 +419,6 @@ void test_vlseg2e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, siz return vlseg2e8_v_i8m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -775,19 +434,6 @@ void test_vlseg3e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const in return vlseg3e8_v_i8m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -805,21 +451,6 @@ void test_vlseg4e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ return vlseg4e8_v_i8m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -839,23 +470,6 @@ void test_vlseg5e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ return vlseg5e8_v_i8m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -877,25 +491,6 @@ void test_vlseg6e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ return vlseg6e8_v_i8m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -919,27 +514,6 @@ void test_vlseg7e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ return vlseg7e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -965,15 +539,6 @@ void test_vlseg8e8_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_ return vlseg8e8_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -987,17 +552,6 @@ void test_vlseg2e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, siz return vlseg2e8_v_i8m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1013,19 +567,6 @@ void test_vlseg3e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const in return vlseg3e8_v_i8m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1043,15 +584,6 @@ void test_vlseg4e8_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_ return vlseg4e8_v_i8m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1065,15 +597,6 @@ void test_vlseg2e8_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, siz return vlseg2e8_v_i8m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1087,17 +610,6 @@ void test_vlseg2e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *b return vlseg2e16_v_i16mf4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1113,19 +625,6 @@ void test_vlseg3e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, return vlseg3e16_v_i16mf4(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1143,21 +642,6 @@ void test_vlseg4e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, return vlseg4e16_v_i16mf4(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1177,23 +661,6 @@ void test_vlseg5e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, return vlseg5e16_v_i16mf4(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1215,25 +682,6 @@ void test_vlseg6e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, return vlseg6e16_v_i16mf4(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1257,27 +705,6 @@ void test_vlseg7e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, return vlseg7e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1303,15 +730,6 @@ void test_vlseg8e16_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, return vlseg8e16_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1325,17 +743,6 @@ void test_vlseg2e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *b return vlseg2e16_v_i16mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1351,19 +758,6 @@ void test_vlseg3e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, return vlseg3e16_v_i16mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1381,21 +775,6 @@ void test_vlseg4e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, return vlseg4e16_v_i16mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1415,23 +794,6 @@ void test_vlseg5e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, return vlseg5e16_v_i16mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1453,25 +815,6 @@ void test_vlseg6e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, return vlseg6e16_v_i16mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1495,27 +838,6 @@ void test_vlseg7e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, return vlseg7e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1541,15 +863,6 @@ void test_vlseg8e16_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, return vlseg8e16_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1563,17 +876,6 @@ void test_vlseg2e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base return vlseg2e16_v_i16m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1589,19 +891,6 @@ void test_vlseg3e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, con return vlseg3e16_v_i16m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1619,21 +908,6 @@ void test_vlseg4e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin return vlseg4e16_v_i16m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1653,23 +927,6 @@ void test_vlseg5e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin return vlseg5e16_v_i16m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1691,25 +948,6 @@ void test_vlseg6e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin return vlseg6e16_v_i16m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1733,27 +971,6 @@ void test_vlseg7e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin return vlseg7e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1779,15 +996,6 @@ void test_vlseg8e16_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vin return vlseg8e16_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1801,17 +1009,6 @@ void test_vlseg2e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base return vlseg2e16_v_i16m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1827,19 +1024,6 @@ void test_vlseg3e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, con return vlseg3e16_v_i16m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1857,15 +1041,6 @@ void test_vlseg4e16_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vin return vlseg4e16_v_i16m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1879,15 +1054,6 @@ void test_vlseg2e16_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base return vlseg2e16_v_i16m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1901,17 +1067,6 @@ void test_vlseg2e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *b return vlseg2e32_v_i32mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1927,19 +1082,6 @@ void test_vlseg3e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, return vlseg3e32_v_i32mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1957,21 +1099,6 @@ void test_vlseg4e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, return vlseg4e32_v_i32mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -1991,23 +1118,6 @@ void test_vlseg5e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, return vlseg5e32_v_i32mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2029,25 +1139,6 @@ void test_vlseg6e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, return vlseg6e32_v_i32mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2071,27 +1162,6 @@ void test_vlseg7e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, return vlseg7e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2117,15 +1187,6 @@ void test_vlseg8e32_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, return vlseg8e32_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2139,17 +1200,6 @@ void test_vlseg2e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base return vlseg2e32_v_i32m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2165,19 +1215,6 @@ void test_vlseg3e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, con return vlseg3e32_v_i32m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2195,21 +1232,6 @@ void test_vlseg4e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin return vlseg4e32_v_i32m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2229,23 +1251,6 @@ void test_vlseg5e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin return vlseg5e32_v_i32m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2267,25 +1272,6 @@ void test_vlseg6e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin return vlseg6e32_v_i32m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2309,27 +1295,6 @@ void test_vlseg7e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin return vlseg7e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2355,15 +1320,6 @@ void test_vlseg8e32_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vin return vlseg8e32_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2377,17 +1333,6 @@ void test_vlseg2e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base return vlseg2e32_v_i32m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2403,19 +1348,6 @@ void test_vlseg3e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, con return vlseg3e32_v_i32m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2433,15 +1365,6 @@ void test_vlseg4e32_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vin return vlseg4e32_v_i32m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2455,15 +1378,6 @@ void test_vlseg2e32_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base return vlseg2e32_v_i32m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2477,17 +1391,6 @@ void test_vlseg2e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base return vlseg2e64_v_i64m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2503,19 +1406,6 @@ void test_vlseg3e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, con return vlseg3e64_v_i64m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2533,21 +1423,6 @@ void test_vlseg4e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin return vlseg4e64_v_i64m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2567,23 +1442,6 @@ void test_vlseg5e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin return vlseg5e64_v_i64m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2605,25 +1463,6 @@ void test_vlseg6e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin return vlseg6e64_v_i64m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2647,27 +1486,6 @@ void test_vlseg7e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin return vlseg7e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2693,15 +1511,6 @@ void test_vlseg8e64_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vin return vlseg8e64_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2715,17 +1524,6 @@ void test_vlseg2e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base return vlseg2e64_v_i64m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2741,19 +1539,6 @@ void test_vlseg3e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, con return vlseg3e64_v_i64m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2771,15 +1556,6 @@ void test_vlseg4e64_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vin return vlseg4e64_v_i64m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2793,15 +1569,6 @@ void test_vlseg2e64_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base return vlseg2e64_v_i64m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2815,17 +1582,6 @@ void test_vlseg2e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *bas return vlseg2e8_v_u8mf8(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2841,19 +1597,6 @@ void test_vlseg3e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, c return vlseg3e8_v_u8mf8(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2871,21 +1614,6 @@ void test_vlseg4e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v return vlseg4e8_v_u8mf8(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2905,23 +1633,6 @@ void test_vlseg5e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v return vlseg5e8_v_u8mf8(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2943,25 +1654,6 @@ void test_vlseg6e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v return vlseg6e8_v_u8mf8(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -2985,27 +1677,6 @@ void test_vlseg7e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v return vlseg7e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3031,15 +1702,6 @@ void test_vlseg8e8_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, v return vlseg8e8_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3053,17 +1715,6 @@ void test_vlseg2e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *bas return vlseg2e8_v_u8mf4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3079,19 +1730,6 @@ void test_vlseg3e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, c return vlseg3e8_v_u8mf4(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3109,21 +1747,6 @@ void test_vlseg4e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v return vlseg4e8_v_u8mf4(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3143,23 +1766,6 @@ void test_vlseg5e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v return vlseg5e8_v_u8mf4(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3181,25 +1787,6 @@ void test_vlseg6e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v return vlseg6e8_v_u8mf4(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3223,27 +1810,6 @@ void test_vlseg7e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v return vlseg7e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3269,15 +1835,6 @@ void test_vlseg8e8_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, v return vlseg8e8_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3291,17 +1848,6 @@ void test_vlseg2e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *bas return vlseg2e8_v_u8mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3317,19 +1863,6 @@ void test_vlseg3e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, c return vlseg3e8_v_u8mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3347,21 +1880,6 @@ void test_vlseg4e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v return vlseg4e8_v_u8mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3381,23 +1899,6 @@ void test_vlseg5e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v return vlseg5e8_v_u8mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3419,25 +1920,6 @@ void test_vlseg6e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v return vlseg6e8_v_u8mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3461,27 +1943,6 @@ void test_vlseg7e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v return vlseg7e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3507,15 +1968,6 @@ void test_vlseg8e8_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, v return vlseg8e8_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3529,17 +1981,6 @@ void test_vlseg2e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, return vlseg2e8_v_u8m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3555,19 +1996,6 @@ void test_vlseg3e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const return vlseg3e8_v_u8m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3585,21 +2013,6 @@ void test_vlseg4e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint return vlseg4e8_v_u8m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3619,23 +2032,6 @@ void test_vlseg5e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint return vlseg5e8_v_u8m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3657,25 +2053,6 @@ void test_vlseg6e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint return vlseg6e8_v_u8m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3699,27 +2076,6 @@ void test_vlseg7e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint return vlseg7e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3745,15 +2101,6 @@ void test_vlseg8e8_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint return vlseg8e8_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3767,17 +2114,6 @@ void test_vlseg2e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, return vlseg2e8_v_u8m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3793,19 +2129,6 @@ void test_vlseg3e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const return vlseg3e8_v_u8m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3823,15 +2146,6 @@ void test_vlseg4e8_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint return vlseg4e8_v_u8m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i32(i8* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3845,15 +2159,6 @@ void test_vlseg2e8_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, return vlseg2e8_v_u8m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3867,17 +2172,6 @@ void test_vlseg2e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t return vlseg2e16_v_u16mf4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3893,19 +2187,6 @@ void test_vlseg3e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * return vlseg3e16_v_u16mf4(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3923,21 +2204,6 @@ void test_vlseg4e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * return vlseg4e16_v_u16mf4(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3957,23 +2223,6 @@ void test_vlseg5e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * return vlseg5e16_v_u16mf4(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3995,25 +2244,6 @@ void test_vlseg6e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * return vlseg6e16_v_u16mf4(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4037,27 +2267,6 @@ void test_vlseg7e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * return vlseg7e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4083,15 +2292,6 @@ void test_vlseg8e16_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t * return vlseg8e16_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4105,17 +2305,6 @@ void test_vlseg2e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t return vlseg2e16_v_u16mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4131,19 +2320,6 @@ void test_vlseg3e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * return vlseg3e16_v_u16mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4161,21 +2337,6 @@ void test_vlseg4e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * return vlseg4e16_v_u16mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4195,23 +2356,6 @@ void test_vlseg5e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * return vlseg5e16_v_u16mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4233,25 +2377,6 @@ void test_vlseg6e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * return vlseg6e16_v_u16mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4275,27 +2400,6 @@ void test_vlseg7e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * return vlseg7e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4321,15 +2425,6 @@ void test_vlseg8e16_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t * return vlseg8e16_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4343,17 +2438,6 @@ void test_vlseg2e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *b return vlseg2e16_v_u16m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4369,19 +2453,6 @@ void test_vlseg3e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, return vlseg3e16_v_u16m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4399,21 +2470,6 @@ void test_vlseg4e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, return vlseg4e16_v_u16m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4433,23 +2489,6 @@ void test_vlseg5e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, return vlseg5e16_v_u16m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4471,25 +2510,6 @@ void test_vlseg6e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, return vlseg6e16_v_u16m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4513,27 +2533,6 @@ void test_vlseg7e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, return vlseg7e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4559,15 +2558,6 @@ void test_vlseg8e16_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, return vlseg8e16_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4581,17 +2571,6 @@ void test_vlseg2e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *b return vlseg2e16_v_u16m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4607,19 +2586,6 @@ void test_vlseg3e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, return vlseg3e16_v_u16m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4637,15 +2603,6 @@ void test_vlseg4e16_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, return vlseg4e16_v_u16m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i32(i16* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4659,15 +2616,6 @@ void test_vlseg2e16_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *b return vlseg2e16_v_u16m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4681,17 +2629,6 @@ void test_vlseg2e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t return vlseg2e32_v_u32mf2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4707,19 +2644,6 @@ void test_vlseg3e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * return vlseg3e32_v_u32mf2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4737,21 +2661,6 @@ void test_vlseg4e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * return vlseg4e32_v_u32mf2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4771,23 +2680,6 @@ void test_vlseg5e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * return vlseg5e32_v_u32mf2(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4809,25 +2701,6 @@ void test_vlseg6e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * return vlseg6e32_v_u32mf2(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4851,27 +2724,6 @@ void test_vlseg7e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * return vlseg7e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4897,15 +2749,6 @@ void test_vlseg8e32_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t * return vlseg8e32_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4919,17 +2762,6 @@ void test_vlseg2e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *b return vlseg2e32_v_u32m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4945,19 +2777,6 @@ void test_vlseg3e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, return vlseg3e32_v_u32m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -4975,21 +2794,6 @@ void test_vlseg4e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, return vlseg4e32_v_u32m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5009,23 +2813,6 @@ void test_vlseg5e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, return vlseg5e32_v_u32m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5047,25 +2834,6 @@ void test_vlseg6e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, return vlseg6e32_v_u32m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5089,27 +2857,6 @@ void test_vlseg7e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, return vlseg7e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5135,15 +2882,6 @@ void test_vlseg8e32_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, return vlseg8e32_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5157,17 +2895,6 @@ void test_vlseg2e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *b return vlseg2e32_v_u32m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5183,19 +2910,6 @@ void test_vlseg3e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, return vlseg3e32_v_u32m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5213,15 +2927,6 @@ void test_vlseg4e32_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, return vlseg4e32_v_u32m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i32(i32* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5235,15 +2940,6 @@ void test_vlseg2e32_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *b return vlseg2e32_v_u32m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5257,17 +2953,6 @@ void test_vlseg2e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *b return vlseg2e64_v_u64m1(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5283,19 +2968,6 @@ void test_vlseg3e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, return vlseg3e64_v_u64m1(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5313,21 +2985,6 @@ void test_vlseg4e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, return vlseg4e64_v_u64m1(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5347,23 +3004,6 @@ void test_vlseg5e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, return vlseg5e64_v_u64m1(v0, v1, v2, v3, v4, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5385,25 +3025,6 @@ void test_vlseg6e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, return vlseg6e64_v_u64m1(v0, v1, v2, v3, v4, v5, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5427,27 +3048,6 @@ void test_vlseg7e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, return vlseg7e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5473,15 +3073,6 @@ void test_vlseg8e64_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, return vlseg8e64_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5495,17 +3086,6 @@ void test_vlseg2e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *b return vlseg2e64_v_u64m2(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5521,19 +3101,6 @@ void test_vlseg3e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, return vlseg3e64_v_u64m2(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5551,15 +3118,6 @@ void test_vlseg4e64_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, return vlseg4e64_v_u64m2(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i32(i64* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// // CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -5573,9018 +3131,22811 @@ void test_vlseg2e64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *b return vlseg2e64_v_u64m4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2(v0, v1, base, vl); +void test_vlseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf4(v0, v1, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2(v0, v1, v2, base, vl); +void test_vlseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2( +void test_vlseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf2(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m1(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m1(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m2(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m2(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m4(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32mf2(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t vl) { + return vlseg3e32_v_f32mf2(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t vl) { + return vlseg4e32_v_f32mf2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t vl) { + return vlseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t vl) { + return vlseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t vl) { + return vlseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t vl) { + return vlseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32m1(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t vl) { + return vlseg3e32_v_f32m1(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t vl) { + return vlseg4e32_v_f32m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t vl) { + return vlseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t vl) { + return vlseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t vl) { + return vlseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t vl) { + return vlseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32m2(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t vl) { + return vlseg3e32_v_f32m2(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t vl) { + return vlseg4e32_v_f32m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t vl) { + return vlseg2e32_v_f32m4(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t vl) { + return vlseg2e64_v_f64m1(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t vl) { + return vlseg3e64_v_f64m1(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t vl) { + return vlseg4e64_v_f64m1(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t vl) { + return vlseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t vl) { + return vlseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t vl) { + return vlseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t vl) { + return vlseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t vl) { + return vlseg2e64_v_f64m2(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t vl) { + return vlseg3e64_v_f64m2(v0, v1, v2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t vl) { + return vlseg4e64_v_f64m2(v0, v1, v2, v3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t vl) { + return vlseg2e64_v_f64m4(v0, v1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf8(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf8(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8 (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4 (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2 (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1 (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2 (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4 (vint8m4_t *v0, vint8m4_t *v1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4 (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2 (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1 (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2 (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4 (vint16m4_t *v0, vint16m4_t *v1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2 (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1 (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2 (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4 (vint32m4_t *v0, vint32m4_t *v1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_i64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1 (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_i64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2 (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4 (vint64m4_t *v0, vint64m4_t *v1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf8(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf8(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf8(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf8(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8 (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf8(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4 (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2 (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv8i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1 (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv16i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2 (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv32i8.i64(i8* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4 (vuint8m4_t *v0, vuint8m4_t *v1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4 (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2 (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1 (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2 (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16i16.i64(i16* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4 (vuint16m4_t *v0, vuint16m4_t *v1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2 (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1 (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2 (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8i32.i64(i32* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4 (vuint32m4_t *v0, vuint32m4_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_u64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1 (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_u64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2 (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4i64.i64(i64* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf4(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf4(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf4(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32mf2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32mf2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32mf2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32mf2(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m1(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m1(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m1(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_f64m1(v0, v1, v2, v3, v4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m2(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m2(v0, v1, v2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m2(v0, v1, v2, v3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m4(v0, v1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { + return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { + return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { + return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { + return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { + return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { + return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { + return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { + return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { + return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { + return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { + return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { + return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { + return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { + return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { + return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { + return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { + return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { + return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { + return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { + return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { + return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { + return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { + return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { + return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { + return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { + return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { + return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { + return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { + return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { + return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { + return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { + return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { + return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { + return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { + return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { + return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { + return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { + return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { + return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { + return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { + return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { + return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { + return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { + return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { + return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { + return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { + return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { + return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { + return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { + return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { + return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { + return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { + return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { + return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { + return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { + return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { + return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { + return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { + return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { + return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { + return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { + return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { + return vlseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { + return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { + return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { + return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { + return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { + return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { + return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { + return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { + return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { + return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { + return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { + return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { + return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { + return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { + return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg5e8ff_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg6e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg7e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg8e8ff_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg3e8ff_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg4e8ff_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl) { + return vlseg2e8ff_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg5e16ff_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg6e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg7e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16ff_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg8e16ff_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg3e16ff_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16ff_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg4e16ff_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16ff_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl) { + return vlseg2e16ff_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl) { + return vlseg5e32ff_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl) { + return vlseg6e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl) { + return vlseg7e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl) { + return vlseg8e32ff_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl) { + return vlseg3e32ff_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl) { + return vlseg4e32ff_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl) { + return vlseg2e32ff_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl) { + return vlseg5e64ff_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl) { + return vlseg6e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl) { + return vlseg7e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl) { + return vlseg8e64ff_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl) { + return vlseg3e64ff_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl) { + return vlseg4e64ff_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) { + return vlseg2e64ff_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg5e64_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg6e64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg7e64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg8e64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg5e8_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg6e8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg7e8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg8e8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg3e8_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg4e8_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl, uint8_t ta) { + return vlseg2e8_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg5e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg5e32_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg6e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg6e32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg7e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg7e32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg8e32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg2e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg3e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg2e32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg2e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg3e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg4e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1(v0, v1, base, vl); +void test_vlseg5e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg5e64_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1(v0, v1, v2, base, vl); +void test_vlseg6e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg6e64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1( +void test_vlseg7e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg7e64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1(v0, v1, v2, v3, base, vl); +void test_vlseg8e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg8e64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1( +void test_vlseg2e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg3e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg3e64_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1( +void test_vlseg4e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg4e64_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg2e64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1( +void test_vlseg2e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg3e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg4e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2(v0, v1, base, vl); +void test_vlseg5e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2(v0, v1, v2, base, vl); +void test_vlseg6e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2(v0, v1, v2, v3, base, vl); +void test_vlseg7e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i32(float* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4(v0, v1, base, vl); +void test_vlseg8e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1(v0, v1, base, vl); +void test_vlseg2e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1(v0, v1, v2, base, vl); +void test_vlseg3e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2(v0, v1, base, vl); +void test_vlseg2e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2(v0, v1, v2, base, vl); +void test_vlseg3e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i32(double* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4( +// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4(v0, v1, base, vl); +void test_vlseg5e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg5e16_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg6e16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg7e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg7e16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg8e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg8e16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg2e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg3e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg3e16_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg4e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg4e16_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg2e16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl, uint8_t ta) { + return vlseg2e16_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg5e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl, uint8_t ta) { + return vlseg5e32_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg6e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl, uint8_t ta) { + return vlseg6e32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg7e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl, uint8_t ta) { + return vlseg7e32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg8e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl, uint8_t ta) { + return vlseg8e32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8mf2_m( +void test_vlseg5e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl, uint8_t ta) { + return vlseg5e32_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg6e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl, uint8_t ta) { + return vlseg6e32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg7e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl, uint8_t ta) { + return vlseg7e32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg8e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl, uint8_t ta) { + return vlseg8e32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg2e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg3e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl, uint8_t ta) { + return vlseg3e32_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg4e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl, uint8_t ta) { + return vlseg4e32_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl, uint8_t ta) { + return vlseg2e32_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg2e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t vl) { - return vlseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg3e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl, uint8_t ta) { + return vlseg3e64_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t vl) { - return vlseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg4e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl, uint8_t ta) { + return vlseg4e64_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t vl) { - return vlseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg5e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl, uint8_t ta) { + return vlseg5e64_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_i8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t vl) { - return vlseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg6e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl, uint8_t ta) { + return vlseg6e64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg7e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl, uint8_t ta) { + return vlseg7e64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t vl) { - return vlseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg8e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl, uint8_t ta) { + return vlseg8e64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_i8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t vl) { - return vlseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg2e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_i8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t vl) { - return vlseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl, uint8_t ta) { + return vlseg3e64_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg4e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl, uint8_t ta) { + return vlseg4e64_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl, uint8_t ta) { + return vlseg2e64_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg2e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg3e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg4e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg5e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg6e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg7e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg4e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg5e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg6e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg7e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg8e8ff_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t vl) { - return vlseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg4e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t vl) { - return vlseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg5e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t vl) { - return vlseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg6e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_i16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t vl) { - return vlseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg7e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlseg8e8ff_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t vl) { - return vlseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_i16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t vl) { - return vlseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_i16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t vl) { - return vlseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg5e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg7e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg8e8ff_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg2e8ff_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg3e8ff_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg4e8ff_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e8ff_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t vl) { - return vlseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t vl) { - return vlseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t vl) { - return vlseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_i32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t vl) { - return vlseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e16ff_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t vl) { - return vlseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_i32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t vl) { - return vlseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_i32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t vl) { - return vlseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg5e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg7e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg8e16ff_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t vl) { - return vlseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg2e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t vl) { - return vlseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg3e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t vl) { - return vlseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg4e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_i64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t vl) { - return vlseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg5e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t vl) { - return vlseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg7e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_i64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t vl) { - return vlseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg8e16ff_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_i64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t vl) { - return vlseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16ff_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e16ff_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e16ff_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg2e16ff_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg2e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg3e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg4e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf8_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg5e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg7e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg8e32ff_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg2e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 + +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg3e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg4e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg5e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg7e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg8e32ff_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg2e32ff_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg3e32ff_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg4e32ff_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e32ff_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t vl) { - return vlseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e64ff_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t vl) { - return vlseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e64ff_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t vl) { - return vlseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e64ff_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e8_v_u8m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t vl) { - return vlseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64ff_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e64ff_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t vl) { - return vlseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64ff_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e8_v_u8m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t vl) { - return vlseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64ff_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e8_v_u8m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t vl) { - return vlseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e8ff_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16mf2_m( +void test_vlseg2e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg5e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg6e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg7e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg8e8ff_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg5e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t vl) { - return vlseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg6e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t vl) { - return vlseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg7e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t vl) { - return vlseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg8e8ff_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV32-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 + +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t vl) { - return vlseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t vl) { - return vlseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_u16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t vl) { - return vlseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg5e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e8ff_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_u16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t vl) { - return vlseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg6e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e8ff_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg7e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e8ff_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg8e8ff_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e8ff_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg2e8ff_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg3e8ff_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e8ff_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg4e8ff_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e8ff_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg2e8ff_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e8ff_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg2e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg3e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg5e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_u32m1_m( +void test_vlseg6e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t vl) { - return vlseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg7e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t vl) { - return vlseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg8e16ff_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t vl) { - return vlseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg2e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_u32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t vl) { - return vlseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg3e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg4e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t vl) { - return vlseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg5e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_u32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t vl) { - return vlseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg6e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_u32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t vl) { - return vlseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg7e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg8e16ff_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t vl) { - return vlseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg4e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t vl) { - return vlseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg5e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t vl) { - return vlseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg6e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t vl) { - return vlseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg7e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg8e16ff_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t vl) { - return vlseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e16ff_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_u64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t vl) { - return vlseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e16ff_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_u64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t vl) { - return vlseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg4e16ff_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e16ff_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg4e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg5e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg6e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg7e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg8e32ff_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e32_v_f32m1_m( +void test_vlseg4e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t vl) { - return vlseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t vl) { - return vlseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t vl) { - return vlseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e32_v_f32m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t vl) { - return vlseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32ff_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32ff_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t vl) { - return vlseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32ff_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e32_v_f32m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t vl) { - return vlseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32ff_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e32_v_f32m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t vl) { - return vlseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32ff_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t vl) { - return vlseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e64ff_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t vl) { - return vlseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e64ff_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t vl) { - return vlseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e64ff_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e64_v_f64m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t vl) { - return vlseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e64ff_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e64ff_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t vl) { - return vlseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e64ff_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e64_v_f64m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t vl) { - return vlseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e64ff_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e64_v_f64m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t vl) { - return vlseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4(v0, v1, base, vl); +void test_vlseg2e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4(v0, v1, v2, base, vl); +void test_vlseg3e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4(v0, v1, v2, v3, base, vl); +void test_vlseg4e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16ff_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2(v0, v1, base, vl); +void test_vlseg2e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2(v0, v1, v2, base, vl); +void test_vlseg3e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16ff_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1(v0, v1, base, vl); +void test_vlseg2e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1(v0, v1, v2, base, vl); +void test_vlseg3e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1(v0, v1, v2, v3, base, vl); +void test_vlseg4e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg5e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, vl); +void test_vlseg5e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e16ff_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg6e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, vl); +void test_vlseg6e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e16ff_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg7e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, vl); +void test_vlseg7e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e16ff_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlseg8e16ff_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, vl); +void test_vlseg8e16ff_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e16ff_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2(v0, v1, base, vl); +void test_vlseg2e16ff_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlseg3e16ff_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2(v0, v1, v2, base, vl); +void test_vlseg3e16ff_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e16ff_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlseg4e16ff_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2(v0, v1, v2, v3, base, vl); +void test_vlseg4e16ff_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e16ff_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i32(half* [[BASE:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4( +// CHECK-RV64-LABEL: @test_vlseg2e16ff_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4(v0, v1, base, vl); +void test_vlseg2e16ff_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e16ff_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg3e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg4e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg5e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg6e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg7e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg8e32ff_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16mf2_m( +void test_vlseg3e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg4e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16mf2_m( +void test_vlseg5e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e32ff_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg6e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e32ff_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 4 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16mf2_m( +void test_vlseg7e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e32ff_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg8e32ff_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e32ff_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg2e32ff_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg3e32ff_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e32ff_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16mf2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg4e32ff_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e32ff_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e32ff_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e32ff_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg5e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg5e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, size_t vl) { - return vlseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, vl); +void test_vlseg4e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg6e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg6e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store i64 [[TMP6]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, size_t vl) { - return vlseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, vl); +void test_vlseg5e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg5e64ff_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg7e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg7e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store i64 [[TMP7]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, size_t vl) { - return vlseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, vl); +void test_vlseg6e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg6e64ff_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg8e16_v_f16m1_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV32-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV32-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV32-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV32-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg8e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store i64 [[TMP8]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, size_t vl) { - return vlseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, vl); +void test_vlseg7e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg7e64ff_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP9:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 8 +// CHECK-RV64-NEXT: store i64 [[TMP9]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg8e64ff_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg8e64ff_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg3e16_v_f16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg3e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, size_t vl) { - return vlseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, vl); +void test_vlseg2e64ff_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg4e16_v_f16m2_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV32-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV32-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV32-NEXT: ret void -// -// CHECK-RV64-LABEL: @test_vlseg4e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store i64 [[TMP4]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, size_t vl) { - return vlseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, vl); +void test_vlseg3e64ff_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg3e64ff_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, new_vl, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV32-LABEL: @test_vlseg2e16_v_f16m4_m( -// CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) -// CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV32-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV32-NEXT: ret void +// CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 4 +// CHECK-RV64-NEXT: store i64 [[TMP5]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret void // -// CHECK-RV64-LABEL: @test_vlseg2e16_v_f16m4_m( +void test_vlseg4e64ff_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg4e64ff_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, new_vl, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , i64 } [[TMP0]], 2 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, size_t vl) { - return vlseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, vl); +void test_vlseg2e64ff_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl, uint8_t ta) { + return vlseg2e64ff_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c index c279ea8..6992dcd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c @@ -7293,7 +7293,7 @@ void test_vlseg2e64ff_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7304,7 +7304,7 @@ void test_vlseg2e64ff_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7319,7 +7319,7 @@ void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7332,7 +7332,7 @@ void test_vlseg2e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7349,7 +7349,7 @@ void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7364,7 +7364,7 @@ void test_vlseg3e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7383,7 +7383,7 @@ void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7400,7 +7400,7 @@ void test_vlseg4e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7421,7 +7421,7 @@ void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7440,7 +7440,7 @@ void test_vlseg5e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7463,7 +7463,7 @@ void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -7484,7 +7484,7 @@ void test_vlseg6e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7509,7 +7509,7 @@ void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -7532,7 +7532,7 @@ void test_vlseg7e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7559,7 +7559,7 @@ void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7570,7 +7570,7 @@ void test_vlseg8e8ff_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7585,7 +7585,7 @@ void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7598,7 +7598,7 @@ void test_vlseg2e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7615,7 +7615,7 @@ void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7630,7 +7630,7 @@ void test_vlseg3e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7649,7 +7649,7 @@ void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7666,7 +7666,7 @@ void test_vlseg4e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7687,7 +7687,7 @@ void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7706,7 +7706,7 @@ void test_vlseg5e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7729,7 +7729,7 @@ void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -7750,7 +7750,7 @@ void test_vlseg6e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -7775,7 +7775,7 @@ void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -7798,7 +7798,7 @@ void test_vlseg7e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -7825,7 +7825,7 @@ void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -7836,7 +7836,7 @@ void test_vlseg8e8ff_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -7851,7 +7851,7 @@ void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -7864,7 +7864,7 @@ void test_vlseg2e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -7881,7 +7881,7 @@ void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -7896,7 +7896,7 @@ void test_vlseg3e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -7915,7 +7915,7 @@ void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -7932,7 +7932,7 @@ void test_vlseg4e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -7953,7 +7953,7 @@ void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -7972,7 +7972,7 @@ void test_vlseg5e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -7995,7 +7995,7 @@ void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -8016,7 +8016,7 @@ void test_vlseg6e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -8041,7 +8041,7 @@ void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -8064,7 +8064,7 @@ void test_vlseg7e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -8091,7 +8091,7 @@ void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8102,7 +8102,7 @@ void test_vlseg8e8ff_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -8117,7 +8117,7 @@ void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -8130,7 +8130,7 @@ void test_vlseg2e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -8147,7 +8147,7 @@ void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -8162,7 +8162,7 @@ void test_vlseg3e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vboo // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -8181,7 +8181,7 @@ void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -8198,7 +8198,7 @@ void test_vlseg4e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -8219,7 +8219,7 @@ void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -8238,7 +8238,7 @@ void test_vlseg5e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -8261,7 +8261,7 @@ void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -8282,7 +8282,7 @@ void test_vlseg6e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -8307,7 +8307,7 @@ void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_i8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -8330,7 +8330,7 @@ void test_vlseg7e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -8357,7 +8357,7 @@ void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8368,7 +8368,7 @@ void test_vlseg8e8ff_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -8383,7 +8383,7 @@ void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -8396,7 +8396,7 @@ void test_vlseg2e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -8413,7 +8413,7 @@ void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_i8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -8428,7 +8428,7 @@ void test_vlseg3e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vboo // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -8447,7 +8447,7 @@ void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_i8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8458,7 +8458,7 @@ void test_vlseg4e8ff_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -8473,7 +8473,7 @@ void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8484,7 +8484,7 @@ void test_vlseg2e8ff_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -8499,7 +8499,7 @@ void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -8512,7 +8512,7 @@ void test_vlseg2e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t ma // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -8529,7 +8529,7 @@ void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -8544,7 +8544,7 @@ void test_vlseg3e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -8563,7 +8563,7 @@ void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -8580,7 +8580,7 @@ void test_vlseg4e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -8601,7 +8601,7 @@ void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -8620,7 +8620,7 @@ void test_vlseg5e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -8643,7 +8643,7 @@ void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -8664,7 +8664,7 @@ void test_vlseg6e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -8689,7 +8689,7 @@ void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -8712,7 +8712,7 @@ void test_vlseg7e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -8739,7 +8739,7 @@ void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -8750,7 +8750,7 @@ void test_vlseg8e16ff_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -8765,7 +8765,7 @@ void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -8778,7 +8778,7 @@ void test_vlseg2e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t ma // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -8795,7 +8795,7 @@ void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -8810,7 +8810,7 @@ void test_vlseg3e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -8829,7 +8829,7 @@ void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -8846,7 +8846,7 @@ void test_vlseg4e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -8867,7 +8867,7 @@ void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -8886,7 +8886,7 @@ void test_vlseg5e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -8909,7 +8909,7 @@ void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -8930,7 +8930,7 @@ void test_vlseg6e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -8955,7 +8955,7 @@ void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -8978,7 +8978,7 @@ void test_vlseg7e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -9005,7 +9005,7 @@ void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -9016,7 +9016,7 @@ void test_vlseg8e16ff_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -9031,7 +9031,7 @@ void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -9044,7 +9044,7 @@ void test_vlseg2e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -9061,7 +9061,7 @@ void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -9076,7 +9076,7 @@ void test_vlseg3e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -9095,7 +9095,7 @@ void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -9112,7 +9112,7 @@ void test_vlseg4e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -9133,7 +9133,7 @@ void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -9152,7 +9152,7 @@ void test_vlseg5e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -9175,7 +9175,7 @@ void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -9196,7 +9196,7 @@ void test_vlseg6e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -9221,7 +9221,7 @@ void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_i16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -9244,7 +9244,7 @@ void test_vlseg7e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -9271,7 +9271,7 @@ void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -9282,7 +9282,7 @@ void test_vlseg8e16ff_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -9297,7 +9297,7 @@ void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -9310,7 +9310,7 @@ void test_vlseg2e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -9327,7 +9327,7 @@ void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_i16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -9342,7 +9342,7 @@ void test_vlseg3e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -9361,7 +9361,7 @@ void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_i16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -9372,7 +9372,7 @@ void test_vlseg4e16ff_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -9387,7 +9387,7 @@ void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -9398,7 +9398,7 @@ void test_vlseg2e16ff_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -9413,7 +9413,7 @@ void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -9426,7 +9426,7 @@ void test_vlseg2e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t ma // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -9443,7 +9443,7 @@ void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -9458,7 +9458,7 @@ void test_vlseg3e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -9477,7 +9477,7 @@ void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -9494,7 +9494,7 @@ void test_vlseg4e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -9515,7 +9515,7 @@ void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -9534,7 +9534,7 @@ void test_vlseg5e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -9557,7 +9557,7 @@ void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -9578,7 +9578,7 @@ void test_vlseg6e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -9603,7 +9603,7 @@ void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -9626,7 +9626,7 @@ void test_vlseg7e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -9653,7 +9653,7 @@ void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -9664,7 +9664,7 @@ void test_vlseg8e32ff_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -9679,7 +9679,7 @@ void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -9692,7 +9692,7 @@ void test_vlseg2e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -9709,7 +9709,7 @@ void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -9724,7 +9724,7 @@ void test_vlseg3e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -9743,7 +9743,7 @@ void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -9760,7 +9760,7 @@ void test_vlseg4e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -9781,7 +9781,7 @@ void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -9800,7 +9800,7 @@ void test_vlseg5e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -9823,7 +9823,7 @@ void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -9844,7 +9844,7 @@ void test_vlseg6e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -9869,7 +9869,7 @@ void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_i32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -9892,7 +9892,7 @@ void test_vlseg7e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -9919,7 +9919,7 @@ void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -9930,7 +9930,7 @@ void test_vlseg8e32ff_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -9945,7 +9945,7 @@ void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -9958,7 +9958,7 @@ void test_vlseg2e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -9975,7 +9975,7 @@ void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_i32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -9990,7 +9990,7 @@ void test_vlseg3e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -10009,7 +10009,7 @@ void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_i32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10020,7 +10020,7 @@ void test_vlseg4e32ff_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10035,7 +10035,7 @@ void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10046,7 +10046,7 @@ void test_vlseg2e32ff_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10061,7 +10061,7 @@ void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -10074,7 +10074,7 @@ void test_vlseg2e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -10091,7 +10091,7 @@ void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -10106,7 +10106,7 @@ void test_vlseg3e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -10125,7 +10125,7 @@ void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg5e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -10142,7 +10142,7 @@ void test_vlseg4e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -10163,7 +10163,7 @@ void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg6e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -10182,7 +10182,7 @@ void test_vlseg5e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -10205,7 +10205,7 @@ void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg7e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -10226,7 +10226,7 @@ void test_vlseg6e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -10251,7 +10251,7 @@ void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg8e64ff_v_i64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -10274,7 +10274,7 @@ void test_vlseg7e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -10301,7 +10301,7 @@ void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10312,7 +10312,7 @@ void test_vlseg8e64ff_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10327,7 +10327,7 @@ void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -10340,7 +10340,7 @@ void test_vlseg2e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -10357,7 +10357,7 @@ void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_i64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -10372,7 +10372,7 @@ void test_vlseg3e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -10391,7 +10391,7 @@ void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_i64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10402,7 +10402,7 @@ void test_vlseg4e64ff_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10417,7 +10417,7 @@ void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10428,7 +10428,7 @@ void test_vlseg2e64ff_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10443,7 +10443,7 @@ void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -10456,7 +10456,7 @@ void test_vlseg2e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -10473,7 +10473,7 @@ void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -10488,7 +10488,7 @@ void test_vlseg3e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -10507,7 +10507,7 @@ void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -10524,7 +10524,7 @@ void test_vlseg4e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -10545,7 +10545,7 @@ void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -10564,7 +10564,7 @@ void test_vlseg5e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -10587,7 +10587,7 @@ void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -10608,7 +10608,7 @@ void test_vlseg6e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -10633,7 +10633,7 @@ void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf8_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -10656,7 +10656,7 @@ void test_vlseg7e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -10683,7 +10683,7 @@ void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10694,7 +10694,7 @@ void test_vlseg8e8ff_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10709,7 +10709,7 @@ void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -10722,7 +10722,7 @@ void test_vlseg2e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -10739,7 +10739,7 @@ void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -10754,7 +10754,7 @@ void test_vlseg3e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -10773,7 +10773,7 @@ void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -10790,7 +10790,7 @@ void test_vlseg4e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -10811,7 +10811,7 @@ void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -10830,7 +10830,7 @@ void test_vlseg5e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -10853,7 +10853,7 @@ void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -10874,7 +10874,7 @@ void test_vlseg6e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -10899,7 +10899,7 @@ void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -10922,7 +10922,7 @@ void test_vlseg7e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -10949,7 +10949,7 @@ void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -10960,7 +10960,7 @@ void test_vlseg8e8ff_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -10975,7 +10975,7 @@ void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -10988,7 +10988,7 @@ void test_vlseg2e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -11005,7 +11005,7 @@ void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -11020,7 +11020,7 @@ void test_vlseg3e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -11039,7 +11039,7 @@ void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -11056,7 +11056,7 @@ void test_vlseg4e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -11077,7 +11077,7 @@ void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -11096,7 +11096,7 @@ void test_vlseg5e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -11119,7 +11119,7 @@ void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -11140,7 +11140,7 @@ void test_vlseg6e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -11165,7 +11165,7 @@ void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -11188,7 +11188,7 @@ void test_vlseg7e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -11215,7 +11215,7 @@ void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -11226,7 +11226,7 @@ void test_vlseg8e8ff_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -11241,7 +11241,7 @@ void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -11254,7 +11254,7 @@ void test_vlseg2e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vu // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -11271,7 +11271,7 @@ void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -11286,7 +11286,7 @@ void test_vlseg3e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -11305,7 +11305,7 @@ void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV32-LABEL: @test_vlseg5e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -11322,7 +11322,7 @@ void test_vlseg4e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg5e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -11343,7 +11343,7 @@ void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV32-LABEL: @test_vlseg6e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -11362,7 +11362,7 @@ void test_vlseg5e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg6e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -11385,7 +11385,7 @@ void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV32-LABEL: @test_vlseg7e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -11406,7 +11406,7 @@ void test_vlseg6e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg7e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -11431,7 +11431,7 @@ void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV32-LABEL: @test_vlseg8e8ff_v_u8m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -11454,7 +11454,7 @@ void test_vlseg7e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg8e8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -11481,7 +11481,7 @@ void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -11492,7 +11492,7 @@ void test_vlseg8e8ff_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -11507,7 +11507,7 @@ void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu // CHECK-RV32-LABEL: @test_vlseg3e8ff_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -11520,7 +11520,7 @@ void test_vlseg2e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vu // // CHECK-RV64-LABEL: @test_vlseg3e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -11537,7 +11537,7 @@ void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV32-LABEL: @test_vlseg4e8ff_v_u8m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -11552,7 +11552,7 @@ void test_vlseg3e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg4e8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -11571,7 +11571,7 @@ void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // CHECK-RV32-LABEL: @test_vlseg2e8ff_v_u8m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -11582,7 +11582,7 @@ void test_vlseg4e8ff_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, v // // CHECK-RV64-LABEL: @test_vlseg2e8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -11597,7 +11597,7 @@ void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -11608,7 +11608,7 @@ void test_vlseg2e8ff_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vu // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -11623,7 +11623,7 @@ void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -11636,7 +11636,7 @@ void test_vlseg2e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -11653,7 +11653,7 @@ void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -11668,7 +11668,7 @@ void test_vlseg3e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -11687,7 +11687,7 @@ void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -11704,7 +11704,7 @@ void test_vlseg4e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -11725,7 +11725,7 @@ void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -11744,7 +11744,7 @@ void test_vlseg5e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -11767,7 +11767,7 @@ void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -11788,7 +11788,7 @@ void test_vlseg6e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -11813,7 +11813,7 @@ void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -11836,7 +11836,7 @@ void test_vlseg7e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -11863,7 +11863,7 @@ void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -11874,7 +11874,7 @@ void test_vlseg8e16ff_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4 // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -11889,7 +11889,7 @@ void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -11902,7 +11902,7 @@ void test_vlseg2e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -11919,7 +11919,7 @@ void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -11934,7 +11934,7 @@ void test_vlseg3e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -11953,7 +11953,7 @@ void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -11970,7 +11970,7 @@ void test_vlseg4e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -11991,7 +11991,7 @@ void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -12010,7 +12010,7 @@ void test_vlseg5e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -12033,7 +12033,7 @@ void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -12054,7 +12054,7 @@ void test_vlseg6e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -12079,7 +12079,7 @@ void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -12102,7 +12102,7 @@ void test_vlseg7e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -12129,7 +12129,7 @@ void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -12140,7 +12140,7 @@ void test_vlseg8e16ff_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2 // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -12155,7 +12155,7 @@ void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -12168,7 +12168,7 @@ void test_vlseg2e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mas // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -12185,7 +12185,7 @@ void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -12200,7 +12200,7 @@ void test_vlseg3e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -12219,7 +12219,7 @@ void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV32-LABEL: @test_vlseg5e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -12236,7 +12236,7 @@ void test_vlseg4e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // // CHECK-RV64-LABEL: @test_vlseg5e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -12257,7 +12257,7 @@ void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV32-LABEL: @test_vlseg6e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -12276,7 +12276,7 @@ void test_vlseg5e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // // CHECK-RV64-LABEL: @test_vlseg6e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -12299,7 +12299,7 @@ void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV32-LABEL: @test_vlseg7e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -12320,7 +12320,7 @@ void test_vlseg6e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // // CHECK-RV64-LABEL: @test_vlseg7e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -12345,7 +12345,7 @@ void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV32-LABEL: @test_vlseg8e16ff_v_u16m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -12368,7 +12368,7 @@ void test_vlseg7e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // // CHECK-RV64-LABEL: @test_vlseg8e16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -12395,7 +12395,7 @@ void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -12406,7 +12406,7 @@ void test_vlseg8e16ff_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t * // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -12421,7 +12421,7 @@ void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // CHECK-RV32-LABEL: @test_vlseg3e16ff_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -12434,7 +12434,7 @@ void test_vlseg2e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask // // CHECK-RV64-LABEL: @test_vlseg3e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -12451,7 +12451,7 @@ void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV32-LABEL: @test_vlseg4e16ff_v_u16m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -12466,7 +12466,7 @@ void test_vlseg3e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // // CHECK-RV64-LABEL: @test_vlseg4e16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -12485,7 +12485,7 @@ void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // CHECK-RV32-LABEL: @test_vlseg2e16ff_v_u16m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -12496,7 +12496,7 @@ void test_vlseg4e16ff_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t * // // CHECK-RV64-LABEL: @test_vlseg2e16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -12511,7 +12511,7 @@ void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -12522,7 +12522,7 @@ void test_vlseg2e16ff_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -12537,7 +12537,7 @@ void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -12550,7 +12550,7 @@ void test_vlseg2e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -12567,7 +12567,7 @@ void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -12582,7 +12582,7 @@ void test_vlseg3e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -12601,7 +12601,7 @@ void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -12618,7 +12618,7 @@ void test_vlseg4e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -12639,7 +12639,7 @@ void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -12658,7 +12658,7 @@ void test_vlseg5e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -12681,7 +12681,7 @@ void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -12702,7 +12702,7 @@ void test_vlseg6e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -12727,7 +12727,7 @@ void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -12750,7 +12750,7 @@ void test_vlseg7e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -12777,7 +12777,7 @@ void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -12788,7 +12788,7 @@ void test_vlseg8e32ff_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2 // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -12803,7 +12803,7 @@ void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -12816,7 +12816,7 @@ void test_vlseg2e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mas // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -12833,7 +12833,7 @@ void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -12848,7 +12848,7 @@ void test_vlseg3e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -12867,7 +12867,7 @@ void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -12884,7 +12884,7 @@ void test_vlseg4e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -12905,7 +12905,7 @@ void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -12924,7 +12924,7 @@ void test_vlseg5e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -12947,7 +12947,7 @@ void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -12968,7 +12968,7 @@ void test_vlseg6e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -12993,7 +12993,7 @@ void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_u32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -13016,7 +13016,7 @@ void test_vlseg7e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -13043,7 +13043,7 @@ void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13054,7 +13054,7 @@ void test_vlseg8e32ff_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t * // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13069,7 +13069,7 @@ void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -13082,7 +13082,7 @@ void test_vlseg2e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mas // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -13099,7 +13099,7 @@ void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_u32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -13114,7 +13114,7 @@ void test_vlseg3e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -13133,7 +13133,7 @@ void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13144,7 +13144,7 @@ void test_vlseg4e32ff_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t * // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13159,7 +13159,7 @@ void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13170,7 +13170,7 @@ void test_vlseg2e32ff_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13185,7 +13185,7 @@ void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -13198,7 +13198,7 @@ void test_vlseg2e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mas // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -13215,7 +13215,7 @@ void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -13230,7 +13230,7 @@ void test_vlseg3e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -13249,7 +13249,7 @@ void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV32-LABEL: @test_vlseg5e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -13266,7 +13266,7 @@ void test_vlseg4e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -13287,7 +13287,7 @@ void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV32-LABEL: @test_vlseg6e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -13306,7 +13306,7 @@ void test_vlseg5e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -13329,7 +13329,7 @@ void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV32-LABEL: @test_vlseg7e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -13350,7 +13350,7 @@ void test_vlseg6e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -13375,7 +13375,7 @@ void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV32-LABEL: @test_vlseg8e64ff_v_u64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -13398,7 +13398,7 @@ void test_vlseg7e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -13425,7 +13425,7 @@ void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13436,7 +13436,7 @@ void test_vlseg8e64ff_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t * // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13451,7 +13451,7 @@ void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -13464,7 +13464,7 @@ void test_vlseg2e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mas // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -13481,7 +13481,7 @@ void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_u64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -13496,7 +13496,7 @@ void test_vlseg3e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -13515,7 +13515,7 @@ void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_u64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13526,7 +13526,7 @@ void test_vlseg4e64ff_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t * // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13541,7 +13541,7 @@ void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13552,7 +13552,7 @@ void test_vlseg2e64ff_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mas // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13567,7 +13567,7 @@ void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -13580,7 +13580,7 @@ void test_vlseg2e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_ // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -13597,7 +13597,7 @@ void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -13612,7 +13612,7 @@ void test_vlseg3e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -13631,7 +13631,7 @@ void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -13648,7 +13648,7 @@ void test_vlseg4e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -13669,7 +13669,7 @@ void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -13688,7 +13688,7 @@ void test_vlseg5e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -13711,7 +13711,7 @@ void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -13732,7 +13732,7 @@ void test_vlseg6e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -13757,7 +13757,7 @@ void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32mf2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -13780,7 +13780,7 @@ void test_vlseg7e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -13807,7 +13807,7 @@ void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -13818,7 +13818,7 @@ void test_vlseg8e32ff_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32 // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -13833,7 +13833,7 @@ void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -13846,7 +13846,7 @@ void test_vlseg2e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t m // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -13863,7 +13863,7 @@ void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -13878,7 +13878,7 @@ void test_vlseg3e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -13897,7 +13897,7 @@ void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV32-LABEL: @test_vlseg5e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -13914,7 +13914,7 @@ void test_vlseg4e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // // CHECK-RV64-LABEL: @test_vlseg5e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -13935,7 +13935,7 @@ void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV32-LABEL: @test_vlseg6e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -13954,7 +13954,7 @@ void test_vlseg5e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // // CHECK-RV64-LABEL: @test_vlseg6e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -13977,7 +13977,7 @@ void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV32-LABEL: @test_vlseg7e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -13998,7 +13998,7 @@ void test_vlseg6e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // // CHECK-RV64-LABEL: @test_vlseg7e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -14023,7 +14023,7 @@ void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV32-LABEL: @test_vlseg8e32ff_v_f32m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -14046,7 +14046,7 @@ void test_vlseg7e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // // CHECK-RV64-LABEL: @test_vlseg8e32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -14073,7 +14073,7 @@ void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -14084,7 +14084,7 @@ void test_vlseg8e32ff_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -14099,7 +14099,7 @@ void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // CHECK-RV32-LABEL: @test_vlseg3e32ff_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -14112,7 +14112,7 @@ void test_vlseg2e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t m // // CHECK-RV64-LABEL: @test_vlseg3e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -14129,7 +14129,7 @@ void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV32-LABEL: @test_vlseg4e32ff_v_f32m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -14144,7 +14144,7 @@ void test_vlseg3e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // // CHECK-RV64-LABEL: @test_vlseg4e32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -14163,7 +14163,7 @@ void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // CHECK-RV32-LABEL: @test_vlseg2e32ff_v_f32m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -14174,7 +14174,7 @@ void test_vlseg4e32ff_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_ // // CHECK-RV64-LABEL: @test_vlseg2e32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -14189,7 +14189,7 @@ void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -14200,7 +14200,7 @@ void test_vlseg2e32ff_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t ma // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -14215,7 +14215,7 @@ void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -14228,7 +14228,7 @@ void test_vlseg2e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t m // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -14245,7 +14245,7 @@ void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -14260,7 +14260,7 @@ void test_vlseg3e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -14279,7 +14279,7 @@ void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV32-LABEL: @test_vlseg5e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , i32 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i32 } [[TMP0]], 1 @@ -14296,7 +14296,7 @@ void test_vlseg4e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // // CHECK-RV64-LABEL: @test_vlseg5e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , i64 } @llvm.riscv.vlseg5ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , i64 } [[TMP0]], 1 @@ -14317,7 +14317,7 @@ void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV32-LABEL: @test_vlseg6e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , i32 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i32 } [[TMP0]], 1 @@ -14336,7 +14336,7 @@ void test_vlseg5e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // // CHECK-RV64-LABEL: @test_vlseg6e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , i64 } @llvm.riscv.vlseg6ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , i64 } [[TMP0]], 1 @@ -14359,7 +14359,7 @@ void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV32-LABEL: @test_vlseg7e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , i32 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i32 } [[TMP0]], 1 @@ -14380,7 +14380,7 @@ void test_vlseg6e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // // CHECK-RV64-LABEL: @test_vlseg7e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , i64 } @llvm.riscv.vlseg7ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , i64 } [[TMP0]], 1 @@ -14405,7 +14405,7 @@ void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV32-LABEL: @test_vlseg8e64ff_v_f64m1_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i32 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i32 } [[TMP0]], 1 @@ -14428,7 +14428,7 @@ void test_vlseg7e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // // CHECK-RV64-LABEL: @test_vlseg8e64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , , i64 } @llvm.riscv.vlseg8ff.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , , i64 } [[TMP0]], 1 @@ -14455,7 +14455,7 @@ void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -14466,7 +14466,7 @@ void test_vlseg8e64ff_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 @@ -14481,7 +14481,7 @@ void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // CHECK-RV32-LABEL: @test_vlseg3e64ff_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , i32 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , i32 } [[TMP0]], 1 @@ -14494,7 +14494,7 @@ void test_vlseg2e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t m // // CHECK-RV64-LABEL: @test_vlseg3e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , i64 } @llvm.riscv.vlseg3ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , i64 } [[TMP0]], 1 @@ -14511,7 +14511,7 @@ void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV32-LABEL: @test_vlseg4e64ff_v_f64m2_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , , , i32 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i32 } [[TMP0]], 1 @@ -14526,7 +14526,7 @@ void test_vlseg3e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // // CHECK-RV64-LABEL: @test_vlseg4e64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , i64 } @llvm.riscv.vlseg4ff.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , i64 } [[TMP0]], 1 @@ -14545,7 +14545,7 @@ void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // CHECK-RV32-LABEL: @test_vlseg2e64ff_v_f64m4_m( // CHECK-RV32-NEXT: entry: -// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call { , , i32 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i32( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i32 [[VL:%.*]], i32 1) // CHECK-RV32-NEXT: [[TMP1:%.*]] = extractvalue { , , i32 } [[TMP0]], 0 // CHECK-RV32-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV32-NEXT: [[TMP2:%.*]] = extractvalue { , , i32 } [[TMP0]], 1 @@ -14556,7 +14556,7 @@ void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_ // // CHECK-RV64-LABEL: @test_vlseg2e64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , i64 } @llvm.riscv.vlseg2ff.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , i64 } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , i64 } [[TMP0]], 1 diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c index dd2bdb9..8115a53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c @@ -3131,4176 +3131,8214 @@ void test_vlsseg2e64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t * return vlsseg2e64_v_u64m4(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4(v0, v1, base, bstride, vl); +void test_vlsseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1(v0, v1, base, bstride, vl); +void test_vlsseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: ret void -// -void test_vlsseg3e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1(v0, v1, v2, base, bstride, vl); -} - -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, bstride, vl); +void test_vlsseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +void test_vlsseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +void test_vlsseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +void test_vlsseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2(v0, v1, v2, base, bstride, vl); +void test_vlsseg2e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32mf2(v0, v1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg3e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32mf2(v0, v1, v2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4(v0, v1, base, bstride, vl); +void test_vlsseg4e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32mf2(v0, v1, v2, v3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg5e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_f32mf2(v0, v1, v2, v3, v4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: ret void -// +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m1(v0, v1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32m1(v0, v1, v2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32m1(v0, v1, v2, v3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_f32m1(v0, v1, v2, v3, v4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m2(v0, v1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32m2(v0, v1, v2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32m2(v0, v1, v2, v3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f32.i64(float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m4(v0, v1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m1(v0, v1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_f64m1(v0, v1, v2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_f64m1(v0, v1, v2, v3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_f64m1(v0, v1, v2, v3, v4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m2(v0, v1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_f64m2(v0, v1, v2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_f64m2(v0, v1, v2, v3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f64.i64(double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m4(v0, v1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// void test_vlsseg3e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { return vlsseg3e8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { + return vlsseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg3e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg4e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg5e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg6e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg7e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg8e8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg3e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg4e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg5e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg6e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg7e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg8e8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg3e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg4e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg5e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg6e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg5e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg6e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg7e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg8e8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg2e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg3e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg4e8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg3e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg5e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg6e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg7e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg8e16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg4e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg5e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg6e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg7e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg8e16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg2e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg3e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg4e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg5e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg6e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg7e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg8e16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg2e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg3e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg4e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg5e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg6e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg7e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg8e32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg2e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg3e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg4e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg5e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg6e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg7e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg8e32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg3e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg4e32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg2e32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e64_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg6e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg6e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); -} - -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +void test_vlsseg2e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg6e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg6e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e8_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg6e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg7e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg8e8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg2e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg3e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e8_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg4e8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e8_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg2e8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e8_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg6e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg7e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg8e16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg2e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg3e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg4e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg5e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg7e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg4e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg5e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg6e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg7e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg4e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg5e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg6e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg7e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg4e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg5e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg6e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg7e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg4e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg5e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e64_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg6e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg7e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7309,13 +11347,13 @@ void test_vlsseg2e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Floa // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7326,13 +11364,13 @@ void test_vlsseg3e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7345,13 +11383,13 @@ void test_vlsseg4e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4(v0, v1, v2, v3, v4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7366,13 +11404,13 @@ void test_vlsseg5e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7389,13 +11427,13 @@ void test_vlsseg6e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv1f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7414,26 +11452,26 @@ void test_vlsseg7e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7442,13 +11480,13 @@ void test_vlsseg2e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Floa // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7459,13 +11497,13 @@ void test_vlsseg3e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7478,13 +11516,13 @@ void test_vlsseg4e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2(v0, v1, v2, v3, v4, base, bstride, vl); +void test_vlsseg5e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7499,13 +11537,13 @@ void test_vlsseg5e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bstride, vl); +void test_vlsseg6e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7522,13 +11560,13 @@ void test_vlsseg6e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +void test_vlsseg7e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv2f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7547,26 +11585,26 @@ void test_vlsseg7e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2 // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +void test_vlsseg8e16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7575,13 +11613,13 @@ void test_vlsseg2e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7592,13 +11630,13 @@ void test_vlsseg3e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -7611,13 +11649,13 @@ void test_vlsseg4e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1(v0, v1, v2, v3, v4, base, bstride, vl); +void test_vlsseg5e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e16_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -7632,13 +11670,13 @@ void test_vlsseg5e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bstride, vl); +void test_vlsseg6e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -7655,13 +11693,13 @@ void test_vlsseg6e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bstride, vl); +void test_vlsseg7e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1( +// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.nxv4f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -7680,26 +11718,26 @@ void test_vlsseg7e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t * // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bstride, vl); +void test_vlsseg8e16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -7708,13 +11746,13 @@ void test_vlsseg2e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2(v0, v1, v2, base, bstride, vl); +void test_vlsseg3e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e16_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2( +// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.nxv8f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -7725,476 +11763,535 @@ void test_vlsseg3e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t * // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2(v0, v1, v2, v3, base, bstride, vl); +void test_vlsseg4e16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e16_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4( +// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.nxv16f16.i64(half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4(v0, v1, base, bstride, vl); +void test_vlsseg2e16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e16_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg3e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg4e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg5e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg6e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg7e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg8e32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg2e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg3e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg4e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg5e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg5e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e32_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg6e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg6e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg7e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg7e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg8e32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg2e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg3e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e32_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vlsseg4e32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv2f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg4e32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e32_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg2e32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e32_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg5e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg5e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg5e16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl); +void test_vlsseg4e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg6e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg5e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vlsseg5.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg6e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg6e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl); +void test_vlsseg5e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg5e64_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg7e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg6e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vlsseg6.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg7e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg7e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl); +void test_vlsseg6e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg6e64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg8e16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vlsseg7e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv4f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vlsseg7.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg8e16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg8e16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl); +void test_vlsseg7e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg7e64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg8e64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vlsseg8.mask.nxv1f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg8e64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg8e64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg3e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg3e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg3e16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl); +void test_vlsseg2e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg4e16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vlsseg3e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv8f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vlsseg3.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg4e16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl); +void test_vlsseg3e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg3e64_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bstride, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vlsseg2e16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vlsseg4e64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv16f16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vlsseg4.mask.nxv2f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vlsseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptrdiff_t bstride, size_t vl) { - return vlsseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl); +void test_vlsseg4e64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg4e64_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bstride, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vlsseg2e64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlsseg2.mask.nxv4f64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vlsseg2e64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, ptrdiff_t bstride, size_t vl, uint8_t ta) { + return vlsseg2e64_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c index e59c336..0cfcba1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c @@ -1566,6 +1566,216 @@ vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size return vluxei64_v_u64m8(base, bindex, vl); } +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4(const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2(const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1(const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2(const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f16m2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4(const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f16m4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8(const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_f16m8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4(const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2(const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1(const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2(const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f16m2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4(const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f16m4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8(const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_f16m8(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4(const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2(const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1(const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2(const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f16m2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4(const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_f16m4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4(const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f16mf4(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2(const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f16mf2(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1(const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f16m1(base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2(const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f16m2(base, bindex, vl); +} + // CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1912,2337 +2122,4247 @@ vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_ // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8 (const double *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f64m8(base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f16mf4_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f16mf2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f16m1_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f16m2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f16m4_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16m8_t test_vluxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_f16m8_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f16mf4_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f16mf2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f16m1_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f16m2_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f16m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_f16m8_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f16m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f16m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_f16m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f16mf4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f16mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f16m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f16m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei8_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei8_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei8_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei8_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei8_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei16_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vluxei16_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei16_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei16_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vluxei16_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vluxei32_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl); } // CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4 (base, bindex, vl); +vint8mf8_t test_vluxei8_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2 (base, bindex, vl); +vint8mf4_t test_vluxei8_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1 (base, bindex, vl); +vint8mf2_t test_vluxei8_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2 (base, bindex, vl); +vint8m1_t test_vluxei8_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vluxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4 (base, bindex, vl); +vint8m2_t test_vluxei8_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vluxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8 (base, bindex, vl); +vint8m4_t test_vluxei8_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4 (base, bindex, vl); +vint8m8_t test_vluxei8_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i8m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2 (base, bindex, vl); +vint8mf8_t test_vluxei16_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1 (base, bindex, vl); +vint8mf4_t test_vluxei16_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2 (base, bindex, vl); +vint8mf2_t test_vluxei16_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vluxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4 (base, bindex, vl); +vint8m1_t test_vluxei16_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vluxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8 (base, bindex, vl); +vint8m2_t test_vluxei16_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4 (base, bindex, vl); +vint8m4_t test_vluxei16_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2 (base, bindex, vl); +vint8mf8_t test_vluxei32_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei32_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei32_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei32_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vluxei32_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vluxei64_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vluxei64_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vluxei64_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vluxei64_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei8_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei8_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei8_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei8_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei8_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei8_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei16_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei16_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei16_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei16_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei16_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vluxei16_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei32_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei32_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei32_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei32_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vluxei32_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vluxei64_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vluxei64_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vluxei64_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vluxei64_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei8_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei8_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei8_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei8_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei8_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei16_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei16_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei16_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei16_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei16_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei32_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei32_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei32_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei32_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vluxei32_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vluxei64_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vluxei64_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vluxei64_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vluxei64_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei8_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei8_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei8_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei8_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei16_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei16_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei16_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei16_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei32_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei32_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei32_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei32_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vluxei64_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vluxei64_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vluxei64_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vluxei64_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_i64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei8_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei8_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei8_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei8_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei8_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei8_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vluxei8_v_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u8m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei16_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei16_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei16_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei16_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei16_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vluxei16_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u8m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei32_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei32_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei32_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei32_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vluxei32_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u8m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vluxei64_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u8mf8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vluxei64_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u8mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vluxei64_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u8mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vluxei64_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u8m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei8_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei8_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei8_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei8_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei8_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei8_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei16_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei16_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei16_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei16_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei16_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vluxei16_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei32_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei32_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei32_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei32_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vluxei32_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vluxei64_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vluxei64_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vluxei64_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vluxei64_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei8_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei8_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei8_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei8_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei8_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei16_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei16_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei16_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei16_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei16_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei32_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei32_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei32_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei32_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vluxei32_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vluxei64_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vluxei64_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vluxei64_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vluxei64_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei8_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei8_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei8_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei8_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei16_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei16_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei16_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei16_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei32_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei32_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei32_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei32_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vluxei64_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vluxei64_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vluxei64_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vluxei64_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_u64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1 (base, bindex, vl); +vfloat16mf4_t test_vluxei16_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2 (base, bindex, vl); +vfloat16m2_t test_vluxei16_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vluxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4 (base, bindex, vl); +vfloat16m4_t test_vluxei16_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f16m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4 (base, bindex, vl); +vfloat16mf4_t test_vluxei32_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2 (base, bindex, vl); +vfloat16mf2_t test_vluxei32_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1 (base, bindex, vl); +vfloat16m1_t test_vluxei32_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2 (base, bindex, vl); +vfloat16m2_t test_vluxei32_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f16m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxei8_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat16mf4_t test_vluxei64_v_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f16mf4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxei8_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat16mf2_t test_vluxei64_v_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f16mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxei8_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat16m1_t test_vluxei64_v_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f16m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_mt( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxei8_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat16m2_t test_vluxei64_v_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f16m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vluxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxei8_v_f16m4_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei8_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vluxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { - return vluxei8_v_f16m8_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei8_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxei16_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei8_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxei16_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei8_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxei16_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei8_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxei16_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei16_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vluxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxei16_v_f16m4_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei16_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m8_t test_vluxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { - return vluxei16_v_f16m8_m (mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei16_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxei32_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei16_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxei32_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei16_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxei32_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei32_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxei32_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei32_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m4_t test_vluxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxei32_v_f16m4_m (mask, maskedoff, base, bindex, vl); +vfloat32m2_t test_vluxei32_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf4_t test_vluxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxei64_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +vfloat32m4_t test_vluxei32_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16mf2_t test_vluxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxei64_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +vfloat32m8_t test_vluxei32_v_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f32m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m1_t test_vluxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxei64_v_f16m1_m (mask, maskedoff, base, bindex, vl); +vfloat32mf2_t test_vluxei64_v_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f32mf2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat16m2_t test_vluxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxei64_v_f16m2_m (mask, maskedoff, base, bindex, vl); +vfloat32m1_t test_vluxei64_v_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f32m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vluxei64_v_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f32m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vluxei64_v_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f32m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei8_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei8_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei8_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei8_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxei8_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei16_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei16_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei16_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei16_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxei16_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei32_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei32_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei32_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei32_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxei32_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vluxei64_v_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f64m1_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vluxei64_v_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f64m2_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vluxei64_v_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f64m4_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vluxei64_v_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxei64_v_f64m8_mt(mask, maskedoff, base, bindex, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c index 29c1122..6b2b190 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg.c @@ -12335,537 +12335,3000 @@ void test_vluxseg2ei64_v_u64m4 (vuint64m4_t *v0, vuint64m4_t *v1, const uint64_t return vluxseg2ei64_v_u64m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: ret void -// -void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); -} +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i8.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 @@ -12873,15932 +15336,29427 @@ void test_vluxseg5ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { + return vluxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { + return vluxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2(v0, v1, base, bindex, vl); +void test_vluxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i16.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4(v0, v1, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2(v0, v1, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1(v0, v1, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2(v0, v1, base, bindex, vl); +void test_vluxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i32.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4(v0, v1, base, bindex, vl); +void test_vluxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2(v0, v1, base, bindex, vl); +void test_vluxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f32.nxv1i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1(v0, v1, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f32.nxv2i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1 (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2(v0, v1, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f32.nxv4i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2 (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f32.nxv8i64.i64(float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4 (vfloat32m4_t *v0, vfloat32m4_t *v1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4(v0, v1, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1(v0, v1, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2(v0, v1, base, bindex, vl); +void test_vluxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i8.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4(v0, v1, base, bindex, vl); +void test_vluxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } - -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2(v0, v1, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i16.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg2ei16_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4(v0, v1, base, bindex, vl); +// +void test_vluxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1(v0, v1, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2(v0, v1, base, bindex, vl); +void test_vluxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i32.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4(v0, v1, base, bindex, vl); +void test_vluxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f64.nxv1i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1 (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2(v0, v1, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2(v0, v1, v2, base, bindex, vl); -} - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f64.nxv2i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +void test_vluxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2 (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f64.nxv4i64.i64(double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4 (vfloat64m4_t *v0, vfloat64m4_t *v1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4(v0, v1, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { + return vluxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { + return vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { + return vluxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i8m4_m (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_i8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { + return vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { + return vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { + return vluxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { + return vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { + return vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { + return vluxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg6ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +// +void test_vluxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { + return vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_i8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i8m2_m (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_i8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { + return vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { + return vluxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { + return vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { + return vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { + return vluxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf4_m (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } - -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 + +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { + return vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8mf2_m (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { + return vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { + return vluxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_i8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i8m1_m (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_i8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { + return vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { + return vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { + return vluxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei8_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei8_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei8_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei8_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_i8m4_mt (vint8m4_t *v0, vint8m4_t *v1, vbool2_t mask, vint8m4_t maskedoff0, vint8m4_t maskedoff1, const int8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i16m4_m (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_i16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf4_m (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16mf2_m (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } - -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_i16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i16m1_m (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_i16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_i16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i16m2_m (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_i16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_i8m2_mt (vint8m2_t *v0, vint8m2_t *v1, vint8m2_t *v2, vint8m2_t *v3, vbool4_t mask, vint8m2_t maskedoff0, vint8m2_t maskedoff1, vint8m2_t maskedoff2, vint8m2_t maskedoff3, const int8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf8_mt (vint8mf8_t *v0, vint8mf8_t *v1, vint8mf8_t *v2, vint8mf8_t *v3, vint8mf8_t *v4, vint8mf8_t *v5, vint8mf8_t *v6, vint8mf8_t *v7, vbool64_t mask, vint8mf8_t maskedoff0, vint8mf8_t maskedoff1, vint8mf8_t maskedoff2, vint8mf8_t maskedoff3, vint8mf8_t maskedoff4, vint8mf8_t maskedoff5, vint8mf8_t maskedoff6, vint8mf8_t maskedoff7, const int8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf4_mt (vint8mf4_t *v0, vint8mf4_t *v1, vint8mf4_t *v2, vint8mf4_t *v3, vint8mf4_t *v4, vint8mf4_t *v5, vint8mf4_t *v6, vint8mf4_t *v7, vbool32_t mask, vint8mf4_t maskedoff0, vint8mf4_t maskedoff1, vint8mf4_t maskedoff2, vint8mf4_t maskedoff3, vint8mf4_t maskedoff4, vint8mf4_t maskedoff5, vint8mf4_t maskedoff6, vint8mf4_t maskedoff7, const int8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_i8mf2_mt (vint8mf2_t *v0, vint8mf2_t *v1, vint8mf2_t *v2, vint8mf2_t *v3, vint8mf2_t *v4, vint8mf2_t *v5, vint8mf2_t *v6, vint8mf2_t *v7, vbool16_t mask, vint8mf2_t maskedoff0, vint8mf2_t maskedoff1, vint8mf2_t maskedoff2, vint8mf2_t maskedoff3, vint8mf2_t maskedoff4, vint8mf2_t maskedoff5, vint8mf2_t maskedoff6, vint8mf2_t maskedoff7, const int8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_i8m1_mt (vint8m1_t *v0, vint8m1_t *v1, vint8m1_t *v2, vint8m1_t *v3, vint8m1_t *v4, vint8m1_t *v5, vint8m1_t *v6, vint8m1_t *v7, vbool8_t mask, vint8m1_t maskedoff0, vint8m1_t maskedoff1, vint8m1_t maskedoff2, vint8m1_t maskedoff3, vint8m1_t maskedoff4, vint8m1_t maskedoff5, vint8m1_t maskedoff6, vint8m1_t maskedoff7, const int8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32mf2_m (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_i32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei16_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i32m1_m (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_i32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_i32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i32m2_m (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_i32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i32m4_m (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_i32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei32_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_i16m4_mt (vint16m4_t *v0, vint16m4_t *v1, vbool4_t mask, vint16m4_t maskedoff0, vint16m4_t maskedoff1, const int16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_i64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_i16mf4_mt (vint16mf4_t *v0, vint16mf4_t *v1, vint16mf4_t *v2, vint16mf4_t *v3, vint16mf4_t *v4, vint16mf4_t *v5, vint16mf4_t *v6, vint16mf4_t *v7, vbool64_t mask, vint16mf4_t maskedoff0, vint16mf4_t maskedoff1, vint16mf4_t maskedoff2, vint16mf4_t maskedoff3, vint16mf4_t maskedoff4, vint16mf4_t maskedoff5, vint16mf4_t maskedoff6, vint16mf4_t maskedoff7, const int16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_i64m1_m (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_i64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_i64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_i64m2_m (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_i64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_i64m4_m (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_i64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_i16mf2_mt (vint16mf2_t *v0, vint16mf2_t *v1, vint16mf2_t *v2, vint16mf2_t *v3, vint16mf2_t *v4, vint16mf2_t *v5, vint16mf2_t *v6, vint16mf2_t *v7, vbool32_t mask, vint16mf2_t maskedoff0, vint16mf2_t maskedoff1, vint16mf2_t maskedoff2, vint16mf2_t maskedoff3, vint16mf2_t maskedoff4, vint16mf2_t maskedoff5, vint16mf2_t maskedoff6, vint16mf2_t maskedoff7, const int16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i16m1_mt (vint16m1_t *v0, vint16m1_t *v1, vint16m1_t *v2, vint16m1_t *v3, vint16m1_t *v4, vint16m1_t *v5, vint16m1_t *v6, vint16m1_t *v7, vbool16_t mask, vint16m1_t maskedoff0, vint16m1_t maskedoff1, vint16m1_t maskedoff2, vint16m1_t maskedoff3, vint16m1_t maskedoff4, vint16m1_t maskedoff5, vint16m1_t maskedoff6, vint16m1_t maskedoff7, const int16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i16m2_mt (vint16m2_t *v0, vint16m2_t *v1, vint16m2_t *v2, vint16m2_t *v3, vbool8_t mask, vint16m2_t maskedoff0, vint16m2_t maskedoff1, vint16m2_t maskedoff2, vint16m2_t maskedoff3, const int16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg5ei8_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg6ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg7ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg8ei8_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg3ei8_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg4ei8_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl) { - return vluxseg2ei8_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg5ei16_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg6ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg7ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg8ei16_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg3ei16_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg4ei16_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u8m4_m (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl) { - return vluxseg2ei16_v_u8m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32mf2_mt (vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t *v2, vint32mf2_t *v3, vint32mf2_t *v4, vint32mf2_t *v5, vint32mf2_t *v6, vint32mf2_t *v7, vbool64_t mask, vint32mf2_t maskedoff0, vint32mf2_t maskedoff1, vint32mf2_t maskedoff2, vint32mf2_t maskedoff3, vint32mf2_t maskedoff4, vint32mf2_t maskedoff5, vint32mf2_t maskedoff6, vint32mf2_t maskedoff7, const int32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_i32m1_mt (vint32m1_t *v0, vint32m1_t *v1, vint32m1_t *v2, vint32m1_t *v3, vint32m1_t *v4, vint32m1_t *v5, vint32m1_t *v6, vint32m1_t *v7, vbool32_t mask, vint32m1_t maskedoff0, vint32m1_t maskedoff1, vint32m1_t maskedoff2, vint32m1_t maskedoff3, vint32m1_t maskedoff4, vint32m1_t maskedoff5, vint32m1_t maskedoff6, vint32m1_t maskedoff7, const int32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_i32m2_mt (vint32m2_t *v0, vint32m2_t *v1, vint32m2_t *v2, vint32m2_t *v3, vbool16_t mask, vint32m2_t maskedoff0, vint32m2_t maskedoff1, vint32m2_t maskedoff2, vint32m2_t maskedoff3, const int32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei64_v_i32m4_mt (vint32m4_t *v0, vint32m4_t *v1, vbool8_t mask, vint32m4_t maskedoff0, vint32m4_t maskedoff1, const int32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg5ei32_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg6ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg7ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg8ei32_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u8m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg3ei32_v_u8m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u8m2_m (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg4ei32_v_u8m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf8_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf8_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf8_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei16_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf8_m (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf8_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf4_m (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u8mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u8mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg4ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u8mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +// +void test_vluxseg4ei32_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u8mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei32_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8mf2_m (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u8mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u8m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_i64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u8m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u8m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_i64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg5ei64_v_u8m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_i64m1_mt (vint64m1_t *v0, vint64m1_t *v1, vint64m1_t *v2, vint64m1_t *v3, vint64m1_t *v4, vint64m1_t *v5, vint64m1_t *v6, vint64m1_t *v7, vbool64_t mask, vint64m1_t maskedoff0, vint64m1_t maskedoff1, vint64m1_t maskedoff2, vint64m1_t maskedoff3, vint64m1_t maskedoff4, vint64m1_t maskedoff5, vint64m1_t maskedoff6, vint64m1_t maskedoff7, const int64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_i64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg6ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg7ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_i64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_i64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u8m1_m (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg8ei64_v_u8m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_i64m2_mt (vint64m2_t *v0, vint64m2_t *v1, vint64m2_t *v2, vint64m2_t *v3, vbool32_t mask, vint64m2_t maskedoff0, vint64m2_t maskedoff1, vint64m2_t maskedoff2, vint64m2_t maskedoff3, const int64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_i64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_i64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_i64m4_mt (vint64m4_t *v0, vint64m4_t *v1, vbool16_t mask, vint64m4_t maskedoff0, vint64m4_t maskedoff1, const int64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_i64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei8_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint8m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei16_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_u8m4_mt (vuint8m4_t *v0, vuint8m4_t *v1, vbool2_t mask, vuint8m4_t maskedoff0, vuint8m4_t maskedoff1, const uint8_t *base, vuint16m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u8m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u16m4_m (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_u16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf4_m (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16mf2_m (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u8m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u8m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_u16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei32_v_u8m2_mt (vuint8m2_t *v0, vuint8m2_t *v1, vuint8m2_t *v2, vuint8m2_t *v3, vbool4_t mask, vuint8m2_t maskedoff0, vuint8m2_t maskedoff1, vuint8m2_t maskedoff2, vuint8m2_t maskedoff3, const uint8_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u8m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u8mf8_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u8mf8_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u16m1_m (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_u16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u8mf8_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_u16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u16m2_m (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_u16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf8_mt (vuint8mf8_t *v0, vuint8mf8_t *v1, vuint8mf8_t *v2, vuint8mf8_t *v3, vuint8mf8_t *v4, vuint8mf8_t *v5, vuint8mf8_t *v6, vuint8mf8_t *v7, vbool64_t mask, vuint8mf8_t maskedoff0, vuint8mf8_t maskedoff1, vuint8mf8_t maskedoff2, vuint8mf8_t maskedoff3, vuint8mf8_t maskedoff4, vuint8mf8_t maskedoff5, vuint8mf8_t maskedoff6, vuint8mf8_t maskedoff7, const uint8_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u8mf8_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u8mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u8mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u8mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf4_mt (vuint8mf4_t *v0, vuint8mf4_t *v1, vuint8mf4_t *v2, vuint8mf4_t *v3, vuint8mf4_t *v4, vuint8mf4_t *v5, vuint8mf4_t *v6, vuint8mf4_t *v7, vbool32_t mask, vuint8mf4_t maskedoff0, vuint8mf4_t maskedoff1, vuint8mf4_t maskedoff2, vuint8mf4_t maskedoff3, vuint8mf4_t maskedoff4, vuint8mf4_t maskedoff5, vuint8mf4_t maskedoff6, vuint8mf4_t maskedoff7, const uint8_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u8mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u8mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u8mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u8mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_u8mf2_mt (vuint8mf2_t *v0, vuint8mf2_t *v1, vuint8mf2_t *v2, vuint8mf2_t *v3, vuint8mf2_t *v4, vuint8mf2_t *v5, vuint8mf2_t *v6, vuint8mf2_t *v7, vbool16_t mask, vuint8mf2_t maskedoff0, vuint8mf2_t maskedoff1, vuint8mf2_t maskedoff2, vuint8mf2_t maskedoff3, vuint8mf2_t maskedoff4, vuint8mf2_t maskedoff5, vuint8mf2_t maskedoff6, vuint8mf2_t maskedoff7, const uint8_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u8mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u8m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u8m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u8m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u8m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u8m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg7ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 1 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 1 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg8ei64_v_u8m1_mt (vuint8m1_t *v0, vuint8m1_t *v1, vuint8m1_t *v2, vuint8m1_t *v3, vuint8m1_t *v4, vuint8m1_t *v5, vuint8m1_t *v6, vuint8m1_t *v7, vbool8_t mask, vuint8m1_t maskedoff0, vuint8m1_t maskedoff1, vuint8m1_t maskedoff2, vuint8m1_t maskedoff3, vuint8m1_t maskedoff4, vuint8m1_t maskedoff5, vuint8m1_t maskedoff6, vuint8m1_t maskedoff7, const uint8_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u8m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei8_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei8_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei16_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32mf2_m (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_u32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei16_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u32m1_m (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_u32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_u32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u32m2_m (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_u32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u32m4_m (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_u32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei32_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u16m4_mt (vuint16m4_t *v0, vuint16m4_t *v1, vbool4_t mask, vuint16m4_t maskedoff0, vuint16m4_t maskedoff1, const uint16_t *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf4_mt (vuint16mf4_t *v0, vuint16mf4_t *v1, vuint16mf4_t *v2, vuint16mf4_t *v3, vuint16mf4_t *v4, vuint16mf4_t *v5, vuint16mf4_t *v6, vuint16mf4_t *v7, vbool64_t mask, vuint16mf4_t maskedoff0, vuint16mf4_t maskedoff1, vuint16mf4_t maskedoff2, vuint16mf4_t maskedoff3, vuint16mf4_t maskedoff4, vuint16mf4_t maskedoff5, vuint16mf4_t maskedoff6, vuint16mf4_t maskedoff7, const uint16_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg5ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei64_v_u16mf2_mt (vuint16mf2_t *v0, vuint16mf2_t *v1, vuint16mf2_t *v2, vuint16mf2_t *v3, vuint16mf2_t *v4, vuint16mf2_t *v5, vuint16mf2_t *v6, vuint16mf2_t *v7, vbool32_t mask, vuint16mf2_t maskedoff0, vuint16mf2_t maskedoff1, vuint16mf2_t maskedoff2, vuint16mf2_t maskedoff3, vuint16mf2_t maskedoff4, vuint16mf2_t maskedoff5, vuint16mf2_t maskedoff6, vuint16mf2_t maskedoff7, const uint16_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_u64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_u64m1_m (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_u64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_u64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei64_v_u16m1_mt (vuint16m1_t *v0, vuint16m1_t *v1, vuint16m1_t *v2, vuint16m1_t *v3, vuint16m1_t *v4, vuint16m1_t *v5, vuint16m1_t *v6, vuint16m1_t *v7, vbool16_t mask, vuint16m1_t maskedoff0, vuint16m1_t maskedoff1, vuint16m1_t maskedoff2, vuint16m1_t maskedoff3, vuint16m1_t maskedoff4, vuint16m1_t maskedoff5, vuint16m1_t maskedoff6, vuint16m1_t maskedoff7, const uint16_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_u64m2_m (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_u64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_u64m4_m (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_u64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_u16m2_mt (vuint16m2_t *v0, vuint16m2_t *v1, vuint16m2_t *v2, vuint16m2_t *v3, vbool8_t mask, vuint16m2_t maskedoff0, vuint16m2_t maskedoff1, vuint16m2_t maskedoff2, vuint16m2_t maskedoff3, const uint16_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei8_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f32mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f32mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f32mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f32mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f32mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u32mf2_mt (vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t *v2, vuint32mf2_t *v3, vuint32mf2_t *v4, vuint32mf2_t *v5, vuint32mf2_t *v6, vuint32mf2_t *v7, vbool64_t mask, vuint32mf2_t maskedoff0, vuint32mf2_t maskedoff1, vuint32mf2_t maskedoff2, vuint32mf2_t maskedoff3, vuint32mf2_t maskedoff4, vuint32mf2_t maskedoff5, vuint32mf2_t maskedoff6, vuint32mf2_t maskedoff7, const uint32_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f32m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f32m1_m (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f32m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u32m1_mt (vuint32m1_t *v0, vuint32m1_t *v1, vuint32m1_t *v2, vuint32m1_t *v3, vuint32m1_t *v4, vuint32m1_t *v5, vuint32m1_t *v6, vuint32m1_t *v7, vbool32_t mask, vuint32m1_t maskedoff0, vuint32m1_t maskedoff1, vuint32m1_t maskedoff2, vuint32m1_t maskedoff3, vuint32m1_t maskedoff4, vuint32m1_t maskedoff5, vuint32m1_t maskedoff6, vuint32m1_t maskedoff7, const uint32_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f32m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f32m2_m (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f32m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u32m2_mt (vuint32m2_t *v0, vuint32m2_t *v1, vuint32m2_t *v2, vuint32m2_t *v3, vbool16_t mask, vuint32m2_t maskedoff0, vuint32m2_t maskedoff1, vuint32m2_t maskedoff2, vuint32m2_t maskedoff3, const uint32_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f32m4_m (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f32m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u32m4_mt (vuint32m4_t *v0, vuint32m4_t *v1, vbool8_t mask, vuint32m4_t maskedoff0, vuint32m4_t maskedoff1, const uint32_t *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei8_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei8_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei8_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei16_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei16_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei16_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei32_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei32_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei32_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f64m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg5ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_u64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg6ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg7ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_u64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f64m1_m (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f64m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg8ei64_v_u64m1_mt (vuint64m1_t *v0, vuint64m1_t *v1, vuint64m1_t *v2, vuint64m1_t *v3, vuint64m1_t *v4, vuint64m1_t *v5, vuint64m1_t *v6, vuint64m1_t *v7, vbool64_t mask, vuint64m1_t maskedoff0, vuint64m1_t maskedoff1, vuint64m1_t maskedoff2, vuint64m1_t maskedoff3, vuint64m1_t maskedoff4, vuint64m1_t maskedoff5, vuint64m1_t maskedoff6, vuint64m1_t maskedoff7, const uint64_t *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_u64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f64m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg3ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_u64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_u64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f64m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg4ei64_v_u64m2_mt (vuint64m2_t *v0, vuint64m2_t *v1, vuint64m2_t *v2, vuint64m2_t *v3, vbool32_t mask, vuint64m2_t maskedoff0, vuint64m2_t maskedoff1, vuint64m2_t maskedoff2, vuint64m2_t maskedoff3, const uint64_t *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_u64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_u64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f64m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg2ei64_v_u64m4_mt (vuint64m4_t *v0, vuint64m4_t *v1, vbool16_t mask, vuint64m4_t maskedoff0, vuint64m4_t maskedoff1, const uint64_t *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_u64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28807,13 +44765,13 @@ void test_vluxseg2ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Flo // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28824,13 +44782,13 @@ void test_vluxseg3ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28843,13 +44801,13 @@ void test_vluxseg4ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28864,13 +44822,13 @@ void test_vluxseg5ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -28887,13 +44845,13 @@ void test_vluxseg6ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -28912,26 +44870,26 @@ void test_vluxseg7ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -28940,13 +44898,13 @@ void test_vluxseg2ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Flo // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -28957,13 +44915,13 @@ void test_vluxseg3ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -28976,13 +44934,13 @@ void test_vluxseg4ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -28997,13 +44955,13 @@ void test_vluxseg5ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29020,13 +44978,13 @@ void test_vluxseg6ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29045,26 +45003,26 @@ void test_vluxseg7ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei8_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29073,13 +45031,13 @@ void test_vluxseg2ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float1 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29090,13 +45048,13 @@ void test_vluxseg3ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29109,13 +45067,13 @@ void test_vluxseg4ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29130,13 +45088,13 @@ void test_vluxseg5ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29153,13 +45111,13 @@ void test_vluxseg6ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29178,26 +45136,26 @@ void test_vluxseg7ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei8_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29206,13 +45164,13 @@ void test_vluxseg2ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float1 // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29223,39 +45181,39 @@ void test_vluxseg3ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei8_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i8.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4(v0, v1, base, bindex, vl); +void test_vluxseg2ei8_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29264,13 +45222,13 @@ void test_vluxseg2ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29281,13 +45239,13 @@ void test_vluxseg3ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29300,13 +45258,13 @@ void test_vluxseg4ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29321,13 +45279,13 @@ void test_vluxseg5ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29344,13 +45302,13 @@ void test_vluxseg6ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29369,26 +45327,26 @@ void test_vluxseg7ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29397,13 +45355,13 @@ void test_vluxseg2ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29414,13 +45372,13 @@ void test_vluxseg3ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29433,13 +45391,13 @@ void test_vluxseg4ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29454,13 +45412,13 @@ void test_vluxseg5ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29477,13 +45435,13 @@ void test_vluxseg6ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29502,26 +45460,26 @@ void test_vluxseg7ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei16_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29530,13 +45488,13 @@ void test_vluxseg2ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29547,13 +45505,13 @@ void test_vluxseg3ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29566,13 +45524,13 @@ void test_vluxseg4ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29587,13 +45545,13 @@ void test_vluxseg5ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29610,13 +45568,13 @@ void test_vluxseg6ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29635,26 +45593,26 @@ void test_vluxseg7ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei16_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29663,13 +45621,13 @@ void test_vluxseg2ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29680,39 +45638,39 @@ void test_vluxseg3ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei16_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i16.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4(v0, v1, base, bindex, vl); +void test_vluxseg2ei16_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4(v0, v1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29721,13 +45679,13 @@ void test_vluxseg2ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29738,13 +45696,13 @@ void test_vluxseg3ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29757,13 +45715,13 @@ void test_vluxseg4ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29778,13 +45736,13 @@ void test_vluxseg5ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29801,13 +45759,13 @@ void test_vluxseg6ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29826,26 +45784,26 @@ void test_vluxseg7ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2(v0, v1, base, bindex, vl); +void test_vluxseg2ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29854,13 +45812,13 @@ void test_vluxseg2ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -29871,13 +45829,13 @@ void test_vluxseg3ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -29890,13 +45848,13 @@ void test_vluxseg4ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -29911,13 +45869,13 @@ void test_vluxseg5ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -29934,13 +45892,13 @@ void test_vluxseg6ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -29959,26 +45917,26 @@ void test_vluxseg7ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei32_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -29987,13 +45945,13 @@ void test_vluxseg2ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30004,13 +45962,13 @@ void test_vluxseg3ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -30023,13 +45981,13 @@ void test_vluxseg4ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -30044,13 +46002,13 @@ void test_vluxseg5ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -30067,13 +46025,13 @@ void test_vluxseg6ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -30092,26 +46050,26 @@ void test_vluxseg7ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei32_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2(v0, v1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30120,13 +46078,13 @@ void test_vluxseg2ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30137,39 +46095,39 @@ void test_vluxseg3ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei32_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv16f16.nxv16i32.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4(v0, v1, base, bindex, vl); +void test_vluxseg2ei32_v_f16m4_mt (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f16m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4(v0, v1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f16mf4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30178,13 +46136,13 @@ void test_vluxseg2ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f16mf4_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30195,13 +46153,13 @@ void test_vluxseg3ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f16mf4_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -30213,14 +46171,14 @@ void test_vluxseg4ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void -// -void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4(v0, v1, v2, v3, v4, base, bindex, vl); +// +void test_vluxseg5ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -30235,13 +46193,13 @@ void test_vluxseg5ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -30258,13 +46216,13 @@ void test_vluxseg6ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv1f16.nxv1i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -30283,26 +46241,26 @@ void test_vluxseg7ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf4 (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf4_mt (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_f16mf4_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2(v0, v1, base, bindex, vl); +void test_vluxseg2ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f16mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30311,13 +46269,13 @@ void test_vluxseg2ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, const _Fl // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f16mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 @@ -30328,13 +46286,13 @@ void test_vluxseg3ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg4ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f16mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 @@ -30347,13 +46305,13 @@ void test_vluxseg4ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg5ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 @@ -30368,13 +46326,13 @@ void test_vluxseg5ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg6ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 @@ -30391,13 +46349,13 @@ void test_vluxseg6ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg7ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv2f16.nxv2i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 @@ -30416,26 +46374,26 @@ void test_vluxseg7ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16m // CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf2 (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg8ei64_v_f16mf2_mt (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_f16mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 // CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1(v0, v1, base, bindex, vl); +void test_vluxseg2ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f16m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 // CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 // CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 @@ -30444,1971 +46402,2217 @@ void test_vluxseg2ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, const _Float // CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1(v0, v1, v2, base, bindex, vl); +void test_vluxseg3ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f16m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f16m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_f16m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_f16m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei64_v_f16m1_mt (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_f16m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f16m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f16m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei64_v_f16m2_mt (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f16m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg2ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg3ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg4ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg6ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg8ei8_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1(v0, v1, v2, v3, v4, base, bindex, vl); +void test_vluxseg3ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1(v0, v1, v2, v3, v4, v5, base, bindex, vl); +void test_vluxseg4ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, base, bindex, vl); +void test_vluxseg5ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.nxv4f16.nxv4i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16m1 (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl); +void test_vluxseg6ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2(v0, v1, base, bindex, vl); +void test_vluxseg7ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2(v0, v1, v2, base, bindex, vl); +void test_vluxseg8ei8_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.nxv8f16.nxv8i64.i64(half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2(v0, v1, v2, v3, base, bindex, vl); +void test_vluxseg2ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei8_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei8_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint8m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg5ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg2ei8_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg3ei8_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg7ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg4ei8_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg8ei16_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg5ei8_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg2ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg6ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg3ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg7ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg4ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg5ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { - return vluxseg8ei8_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg5ei8_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg6ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg7ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei16_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint16m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei8_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { - return vluxseg8ei8_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg3ei8_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei8_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint8m1_t bindex, size_t vl) { - return vluxseg4ei8_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei8_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint8m2_t bindex, size_t vl) { - return vluxseg2ei8_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg6ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: ret void +// +void test_vluxseg7ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg5ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg6ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg7ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei32_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg5ei16_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei32_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg6ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei32_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint32m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg7ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f32mf2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { - return vluxseg8ei16_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f32mf2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f32mf2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg5ei16_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg6ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei64_v_f32mf2_mt (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_f32mf2_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg7ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f32m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei16_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint16m1_t bindex, size_t vl) { - return vluxseg8ei16_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f32m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f32m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg3ei16_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_f32m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint16m2_t bindex, size_t vl) { - return vluxseg4ei16_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_f32m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint16m4_t bindex, size_t vl) { - return vluxseg2ei16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg8ei64_v_f32m1_mt (vfloat32m1_t *v0, vfloat32m1_t *v1, vfloat32m1_t *v2, vfloat32m1_t *v3, vfloat32m1_t *v4, vfloat32m1_t *v5, vfloat32m1_t *v6, vfloat32m1_t *v7, vbool32_t mask, vfloat32m1_t maskedoff0, vfloat32m1_t maskedoff1, vfloat32m1_t maskedoff2, vfloat32m1_t maskedoff3, vfloat32m1_t maskedoff4, vfloat32m1_t maskedoff5, vfloat32m1_t maskedoff6, vfloat32m1_t maskedoff7, const float *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_f32m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f32m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg3ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f32m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg4ei64_v_f32m2_mt (vfloat32m2_t *v0, vfloat32m2_t *v1, vfloat32m2_t *v2, vfloat32m2_t *v3, vbool16_t mask, vfloat32m2_t maskedoff0, vfloat32m2_t maskedoff1, vfloat32m2_t maskedoff2, vfloat32m2_t maskedoff3, const float *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f32m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 4 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 4 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg2ei64_v_f32m4_mt (vfloat32m4_t *v0, vfloat32m4_t *v1, vbool8_t mask, vfloat32m4_t maskedoff0, vfloat32m4_t maskedoff1, const float *base, vuint64m8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f32m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg2ei32_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg3ei32_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg5ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei8_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg4ei32_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg6ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei8_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg5ei32_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg7ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei8_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei8_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg6ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg8ei8_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint8mf8_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei8_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg7ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint32m1_t bindex, size_t vl) { - return vluxseg8ei32_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg3ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei8_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei8_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei8_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint8mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei8_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei8_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei8_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint8mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei8_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg5ei32_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg6ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg7ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei16_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei32_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint32m2_t bindex, size_t vl) { - return vluxseg8ei32_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei16_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei16_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei16_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg3ei32_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei16_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint16mf4_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei16_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei32_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint32m4_t bindex, size_t vl) { - return vluxseg4ei32_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei32_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, vuint32m8_t bindex, size_t vl) { - return vluxseg2ei32_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei16_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei16_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg4ei16_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint16mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei16_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei16_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf4_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg2ei16_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint16m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei16_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf4_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf4_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg5ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei32_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf4_m (vfloat16mf4_t *v0, vfloat16mf4_t *v1, vfloat16mf4_t *v2, vfloat16mf4_t *v3, vfloat16mf4_t *v4, vfloat16mf4_t *v5, vfloat16mf4_t *v6, vfloat16mf4_t *v7, vbool64_t mask, vfloat16mf4_t maskedoff0, vfloat16mf4_t maskedoff1, vfloat16mf4_t maskedoff2, vfloat16mf4_t maskedoff3, vfloat16mf4_t maskedoff4, vfloat16mf4_t maskedoff5, vfloat16mf4_t maskedoff6, vfloat16mf4_t maskedoff7, const _Float16 *base, vuint64m1_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf4_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg6ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei32_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg2ei64_v_f16mf2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg7ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei32_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei32_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg3ei64_v_f16mf2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg8ei32_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint32mf2_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei32_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg4ei64_v_f16mf2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg5ei64_v_f16mf2_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg3ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei32_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei32_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg6ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg4ei32_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint32m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei32_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg7ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg2ei32_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint32m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei32_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat16mf2_t *v2, vfloat16mf2_t *v3, vfloat16mf2_t *v4, vfloat16mf2_t *v5, vfloat16mf2_t *v6, vfloat16mf2_t *v7, vbool32_t mask, vfloat16mf2_t maskedoff0, vfloat16mf2_t maskedoff1, vfloat16mf2_t maskedoff2, vfloat16mf2_t maskedoff3, vfloat16mf2_t maskedoff4, vfloat16mf2_t maskedoff5, vfloat16mf2_t maskedoff6, vfloat16mf2_t maskedoff7, const _Float16 *base, vuint64m2_t bindex, size_t vl) { - return vluxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f64m1_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m1_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f64m1_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m1_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f64m1_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m1_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg5ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg5ei64_v_f64m1_mt(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg5ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , } @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg5ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg5ei64_v_f16m1_m(v0, v1, v2, v3, v4, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, base, bindex, vl); +void test_vluxseg6ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg6ei64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg6ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , } @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg6ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg6ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, base, bindex, vl); +void test_vluxseg7ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg7ei64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg7ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f64m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , } @llvm.riscv.vluxseg7.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 +// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 +// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 +// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 +// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg7ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg7ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, base, bindex, vl); +void test_vluxseg8ei64_v_f64m1_mt (vfloat64m1_t *v0, vfloat64m1_t *v1, vfloat64m1_t *v2, vfloat64m1_t *v3, vfloat64m1_t *v4, vfloat64m1_t *v5, vfloat64m1_t *v6, vfloat64m1_t *v7, vbool64_t mask, vfloat64m1_t maskedoff0, vfloat64m1_t maskedoff1, vfloat64m1_t maskedoff2, vfloat64m1_t maskedoff3, vfloat64m1_t maskedoff4, vfloat64m1_t maskedoff5, vfloat64m1_t maskedoff6, vfloat64m1_t maskedoff7, const double *base, vuint64m1_t bindex, size_t vl, uint8_t ta) { + return vluxseg8ei64_v_f64m1_mt(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg8ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , , , , , } @llvm.riscv.vluxseg8.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], [[MASKEDOFF4:%.*]], [[MASKEDOFF5:%.*]], [[MASKEDOFF6:%.*]], [[MASKEDOFF7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 4 -// CHECK-RV64-NEXT: store [[TMP5]], * [[V4:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 5 -// CHECK-RV64-NEXT: store [[TMP6]], * [[V5:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP7:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 6 -// CHECK-RV64-NEXT: store [[TMP7]], * [[V6:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP8:%.*]] = extractvalue { , , , , , , , } [[TMP0]], 7 -// CHECK-RV64-NEXT: store [[TMP8]], * [[V7:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg8ei64_v_f16m1_m (vfloat16m1_t *v0, vfloat16m1_t *v1, vfloat16m1_t *v2, vfloat16m1_t *v3, vfloat16m1_t *v4, vfloat16m1_t *v5, vfloat16m1_t *v6, vfloat16m1_t *v7, vbool16_t mask, vfloat16m1_t maskedoff0, vfloat16m1_t maskedoff1, vfloat16m1_t maskedoff2, vfloat16m1_t maskedoff3, vfloat16m1_t maskedoff4, vfloat16m1_t maskedoff5, vfloat16m1_t maskedoff6, vfloat16m1_t maskedoff7, const _Float16 *base, vuint64m4_t bindex, size_t vl) { - return vluxseg8ei64_v_f16m1_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl); +void test_vluxseg2ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f64m2_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg2ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg2ei64_v_f16m2_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl); +void test_vluxseg3ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg3ei64_v_f64m2_mt(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f64m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , } @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 +// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 +// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg3ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg3ei64_v_f16m2_m(v0, v1, v2, mask, maskedoff0, maskedoff1, maskedoff2, base, bindex, vl); +void test_vluxseg4ei64_v_f64m2_mt (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_t *v2, vfloat64m2_t *v3, vbool32_t mask, vfloat64m2_t maskedoff0, vfloat64m2_t maskedoff1, vfloat64m2_t maskedoff2, vfloat64m2_t maskedoff3, const double *base, vuint64m2_t bindex, size_t vl, uint8_t ta) { + return vluxseg4ei64_v_f64m2_mt(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vluxseg4ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vluxseg2ei64_v_f64m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , , , } @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], [[MASKEDOFF2:%.*]], [[MASKEDOFF3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , , , } [[TMP0]], 0 -// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , , , } [[TMP0]], 1 -// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , , , } [[TMP0]], 2 -// CHECK-RV64-NEXT: store [[TMP3]], * [[V2:%.*]], align 2 -// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP0]], 3 -// CHECK-RV64-NEXT: store [[TMP4]], * [[V3:%.*]], align 2 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF0:%.*]], [[MASKEDOFF1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: [[TMP1:%.*]] = extractvalue { , } [[TMP0]], 0 +// CHECK-RV64-NEXT: store [[TMP1]], * [[V0:%.*]], align 8 +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , } [[TMP0]], 1 +// CHECK-RV64-NEXT: store [[TMP2]], * [[V1:%.*]], align 8 // CHECK-RV64-NEXT: ret void // -void test_vluxseg4ei64_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t *v2, vfloat16m2_t *v3, vbool8_t mask, vfloat16m2_t maskedoff0, vfloat16m2_t maskedoff1, vfloat16m2_t maskedoff2, vfloat16m2_t maskedoff3, const _Float16 *base, vuint64m8_t bindex, size_t vl) { - return vluxseg4ei64_v_f16m2_m(v0, v1, v2, v3, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, base, bindex, vl); +void test_vluxseg2ei64_v_f64m4_mt (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, vuint64m4_t bindex, size_t vl, uint8_t ta) { + return vluxseg2ei64_v_f64m4_mt(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl, VE_TAIL_AGNOSTIC); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c index 903a3a4..1b5c7b1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmax.c @@ -792,799 +792,1591 @@ vuint64m8_t test_vmaxu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vmaxu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vmaxu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmax_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vmax_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmax_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmax_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vmax_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmax_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmax_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vmax_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmax_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmax_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vmax_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmax_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmax_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vmax_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmax_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmax_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vmax_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmax_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmax_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vmax_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmax_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmax_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vmax_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmax_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmax_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vmax_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmax_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmax_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vmax_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmax_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmax_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vmax_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmax_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmax_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vmax_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmax_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmax_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vmax_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmax_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmax_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vmax_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmax_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmax_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vmax_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmax_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmax_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vmax_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmax_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmax_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vmax_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmax_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmax_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vmax_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmax_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmax_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vmax_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmax_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmax_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vmax_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmax_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmax_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vmax_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmax_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmax_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vmax_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmax_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmax_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vmax_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmax_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmax_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vmax_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmax_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmax_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vmax_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmax_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmax_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vmax_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmax_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmax_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vmax_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmax_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmax_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vmax_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmax_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmax_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vmax_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmax_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmax_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vmax_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmax_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmax_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vmax_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmax_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmax_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vmax_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmax_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmax_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vmax_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmax_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmax_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vmax_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmax_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmax_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vmax_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmax_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmax_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vmax_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmax_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmax_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vmax_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmax_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmax_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vmax_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmax_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmax_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vmax_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmax_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmax_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vmax_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmax_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmax_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vmax_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmax_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmax_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vmax_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmax_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmax_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vmax_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmax_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmax_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmax_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vmax_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmax_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmaxu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vmaxu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmaxu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmaxu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vmaxu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmaxu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vmaxu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmaxu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmaxu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vmaxu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmaxu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vmaxu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmaxu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmaxu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vmaxu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmaxu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vmaxu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmaxu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmaxu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vmaxu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmaxu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vmaxu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmaxu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmaxu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vmaxu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmaxu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vmaxu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmaxu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmaxu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vmaxu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmaxu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vmaxu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmaxu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmaxu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vmaxu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmaxu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmaxu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vmaxu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmaxu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmaxu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vmaxu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmaxu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmaxu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vmaxu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmaxu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmaxu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vmaxu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmaxu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmaxu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vmaxu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmaxu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmaxu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vmaxu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmaxu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmaxu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vmaxu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmaxu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmaxu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vmaxu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmaxu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmaxu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vmaxu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmaxu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmaxu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vmaxu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmaxu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmaxu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vmaxu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmaxu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmaxu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vmaxu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmaxu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmaxu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vmaxu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmaxu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmaxu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vmaxu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmaxu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmaxu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vmaxu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmaxu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmaxu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vmaxu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmaxu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmaxu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vmaxu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmaxu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmaxu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vmaxu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmaxu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmaxu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vmaxu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmaxu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmaxu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vmaxu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmaxu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmaxu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vmaxu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmaxu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmaxu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vmaxu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmaxu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmaxu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vmaxu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmaxu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmaxu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vmaxu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmaxu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmaxu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vmaxu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmaxu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmaxu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vmaxu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmaxu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmaxu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vmaxu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmaxu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmaxu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vmaxu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmaxu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vmaxu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmaxu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmaxu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vmaxu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmaxu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmax_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmax_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmax_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmax_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmax_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmax_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmax_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmax_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmax_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmax_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmax_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmax_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmax_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmax_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmax_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmax_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmax_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmax_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmax_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmax_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmax_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vmax_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmax_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmax.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmax_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmax_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmaxu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmaxu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmaxu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmaxu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmaxu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmaxu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmaxu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmaxu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmaxu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmaxu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmaxu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmaxu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmaxu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmaxu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmaxu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmaxu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmaxu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmaxu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmaxu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmaxu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmaxu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vmaxu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmaxu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmaxu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmaxu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmaxu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c index 413ec6c..542c239 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfeq.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfeq_vv_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfeq_vv_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfeq_vv_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfeq_vv_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfeq_vv_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfeq_vv_f16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m8_b2(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -174,407 +282,273 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { return vmfeq_vf_f64m8_b8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vmfeq_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfeq_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfeq_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfeq_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfeq_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfeq_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfeq_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { - return vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfeq_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfeq_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfeq_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfeq_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq_vv_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfeq_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m8_b2(op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfeq_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfeq_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfeq_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmfeq_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfeq_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmfeq_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfeq_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfeq_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfeq_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfeq_vv_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfeq_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfeq_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfeq_vf_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c index 8a7a362..cca0e52 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfge.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfge_vv_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfge_vv_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfge_vv_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfge_vv_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfge_vv_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfge_vv_f16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m8_b2(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -168,6 +276,114 @@ vbool8_t test_vmfge_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) { return vmfge_vf_f64m8_b8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfge_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfge_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfge_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfge_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfge_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfge_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) @@ -330,218 +546,3 @@ vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8 return vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfge_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge_vv_f16mf4_b64(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfge_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf4_b64(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfge_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge_vv_f16mf2_b32(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfge_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf2_b32(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfge_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge_vv_f16m1_b16(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfge_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m1_b16(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfge_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge_vv_f16m2_b8(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfge_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m2_b8(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfge_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge_vv_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfge_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfge_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge_vv_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfge_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf4_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfge_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfge_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf4_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfge_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16mf2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfge_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfge_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16mf2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfge_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m1_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfge_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfge_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m1_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfge_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m2_b8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfge_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfge_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m2_b8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfge_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfge_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfge_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfge_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vv_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfge_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfge_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfge_vf_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c index 0e9dbd4..a5073a5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfgt.c @@ -2,10 +2,119 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfgt_vv_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfgt_vv_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfgt_vv_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfgt_vv_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfgt_vv_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfgt_vv_f16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m8_b2(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -168,6 +277,114 @@ vbool8_t test_vmfgt_vf_f64m8_b8 (vfloat64m8_t op1, double op2, size_t vl) { return vmfgt_vf_f64m8_b8(op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfgt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfgt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfgt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfgt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfgt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfgt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) @@ -330,218 +547,3 @@ vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8 return vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfgt_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt_vv_f16mf4_b64(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfgt_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf4_b64(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfgt_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt_vv_f16mf2_b32(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfgt_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf2_b32(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfgt_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt_vv_f16m1_b16(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfgt_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m1_b16(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfgt_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt_vv_f16m2_b8(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfgt_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m2_b8(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfgt_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt_vv_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfgt_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfgt_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt_vv_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfgt_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf4_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfgt_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfgt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf4_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfgt_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16mf2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfgt_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfgt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16mf2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfgt_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m1_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfgt_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfgt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m1_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfgt_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m2_b8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfgt_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfgt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m2_b8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool8_t test_vmfgt_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfgt_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfgt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfgt_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vv_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfgt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfgt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfgt_vf_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c index 77fac48..750fcf7 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfle.c @@ -6,13 +6,120 @@ #include +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfle_vv_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfle_vv_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfle_vv_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfle_vv_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfle_vv_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfle_vv_f16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m8_b2(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { +vbool64_t test_vmfle_vv_f32mf2_b64 (vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { return vmfle_vv_f32mf2_b64(op1, op2, vl); } @@ -21,7 +128,7 @@ vbool64_t test_vmfle_vv_f32mf2_b64(vfloat32mf2_t op1, vfloat32mf2_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f32mf2_b64(vfloat32mf2_t op1, float op2, size_t vl) { +vbool64_t test_vmfle_vf_f32mf2_b64 (vfloat32mf2_t op1, float op2, size_t vl) { return vmfle_vf_f32mf2_b64(op1, op2, vl); } @@ -174,407 +281,273 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { return vmfle_vf_f64m8_b8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfle_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfle_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfle_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfle_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfle_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfle_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { - return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfle_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfle_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfle_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfle_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmfle_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmfle_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfle_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle_vv_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfle_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m8_b2(op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfle_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfle_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfle_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmfle_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfle_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmfle_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfle_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfle_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfle_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfle_vv_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfle_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfle_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfle_vf_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfle_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c index bf9158d..e1ea233 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmflt.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmflt_vv_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmflt_vv_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmflt_vv_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmflt_vv_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmflt_vv_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmflt_vv_f16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m8_b2(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -174,407 +282,273 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { return vmflt_vf_f64m8_b8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vmflt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmflt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vmflt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmflt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmflt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmflt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmflt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmflt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { - return vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmflt_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmflt_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmflt_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vmflt_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmflt_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmflt_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmflt_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt_vv_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmflt_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m8_b2(op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmflt_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmflt_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmflt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmflt_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmflt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmflt_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmflt_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmflt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmflt_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmflt_vv_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmflt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmflt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmflt_vf_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmflt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c index a59d5c2..0c4a28d 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmfne.c @@ -6,6 +6,114 @@ #include +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_f16mf4_b64(vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfne_vv_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_f16mf4_b64(vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_f16mf2_b32(vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfne_vv_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_f16mf2_b32(vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_f16m1_b16(vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfne_vv_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_f16m1_b16(vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_f16m2_b8(vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfne_vv_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_f16m2_b8(vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_f16m4_b4(vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfne_vv_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_f16m4_b4(vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_f16m8_b2(vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfne_vv_f16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_f16m8_b2(vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m8_b2(op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) @@ -174,407 +282,273 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { return vmfne_vf_f64m8_b8(op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, vfloat32mf2_t op2, - size_t vl) { - return vmfne_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { + return vmfne_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat32mf2_t op1, float op2, size_t vl) { - return vmfne_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f16mf4_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, vfloat32m1_t op2, - size_t vl) { - return vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { + return vmfne_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat32m1_t op1, float op2, size_t vl) { - return vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f16mf2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, vfloat32m2_t op2, - size_t vl) { - return vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return vmfne_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat32m2_t op1, float op2, size_t vl) { - return vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, vfloat32m4_t op2, - size_t vl) { - return vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return vmfne_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat32m4_t op1, float op2, size_t vl) { - return vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, vfloat32m8_t op2, - size_t vl) { - return vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return vmfne_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, - vfloat32m8_t op1, float op2, size_t vl) { - return vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, vfloat64m1_t op2, - size_t vl) { - return vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, - vfloat64m1_t op1, double op2, size_t vl) { - return vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, vfloat64m2_t op2, - size_t vl) { - return vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, - vfloat64m2_t op1, double op2, size_t vl) { - return vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, vfloat64m4_t op2, - size_t vl) { - return vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, - vfloat64m4_t op1, double op2, size_t vl) { - return vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, vfloat64m8_t op2, - size_t vl) { - return vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return vmfne_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, - vfloat64m8_t op1, double op2, size_t vl) { - return vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f16.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f16mf4_b64 (vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne_vv_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfne_vv_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) { + return vmfne_vv_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64( +// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f16mf4_b64 (vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf4_b64(op1, op2, vl); +vbool64_t test_vmfne_vf_f32mf2_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) { + return vmfne_vf_f32mf2_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f16.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f16mf2_b32 (vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne_vv_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32( +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m1_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f16mf2_b32 (vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf2_b32(op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f16.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f16m1_b16 (vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne_vv_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16( +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m2_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f16m1_b16 (vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m1_b16(op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f16.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f16m2_b8 (vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne_vv_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8( +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m4_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f16m2_b8 (vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m2_b8(op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmfne_vv_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f16.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f16m4_b4 (vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne_vv_f16m4_b4(op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4( +// CHECK-RV64-LABEL: @test_vmfne_vf_f32m8_b4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f16m4_b4 (vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m4_b4(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32f16.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfne_vv_f16m8_b2 (vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne_vv_f16m8_b2(op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32f16.f16.i64( [[OP1:%.*]], half [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfne_vf_f16m8_b2 (vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m8_b2(op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) { - return vmfne_vv_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf4_b64_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m1_b64_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f16mf4_b64_m (vbool64_t mask, vbool64_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf4_b64_m(mask, maskedoff, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) { - return vmfne_vv_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16mf2_b32_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m2_b32_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f16mf2_b32_m (vbool32_t mask, vbool32_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16mf2_b32_m(mask, maskedoff, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return vmfne_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m1_b16_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m4_b16_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f16m1_b16_m (vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmfne_vv_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return vmfne_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m2_b8_m( +// CHECK-RV64-LABEL: @test_vmfne_vf_f64m8_b8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f16m2_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfne_vv_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return vmfne_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m4_b4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool4_t test_vmfne_vf_f16m4_b4_m (vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); -} - -// CHECK-RV64-LABEL: @test_vmfne_vv_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfne_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return vmfne_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vmfne_vf_f16m8_b2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32f16.f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], half [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vbool2_t test_vmfne_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c index fe50e2b..46516eb 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmin.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vminu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vminu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vminu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmin_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vmin_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmin_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmin_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vmin_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmin_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmin_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vmin_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmin_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmin_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vmin_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmin_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmin_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vmin_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmin_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmin_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vmin_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmin_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmin_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vmin_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmin_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmin_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vmin_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmin_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmin_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vmin_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmin_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmin_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vmin_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmin_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmin_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vmin_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmin_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmin_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vmin_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmin_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmin_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vmin_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmin_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmin_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vmin_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmin_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmin_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vmin_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmin_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmin_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vmin_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmin_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmin_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vmin_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmin_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmin_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vmin_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmin_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmin_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vmin_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmin_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmin_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vmin_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmin_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmin_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vmin_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmin_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmin_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vmin_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmin_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmin_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vmin_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmin_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmin_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vmin_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmin_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmin_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vmin_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmin_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmin_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vmin_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmin_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmin_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vmin_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmin_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmin_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vmin_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmin_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmin_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vmin_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmin_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmin_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vmin_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmin_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmin_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vmin_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmin_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmin_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vmin_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmin_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmin_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vmin_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmin_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmin_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vmin_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmin_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmin_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vmin_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmin_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmin_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vmin_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmin_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmin_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vmin_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmin_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmin_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vmin_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmin_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmin_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vmin_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmin_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmin_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vmin_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmin_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmin_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vmin_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmin_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmin_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vmin_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmin_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmin_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vmin_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmin_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmin_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmin_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vmin_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmin_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vminu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vminu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vminu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vminu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vminu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vminu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vminu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vminu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vminu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vminu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vminu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vminu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vminu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vminu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vminu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vminu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vminu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vminu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vminu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vminu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vminu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vminu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vminu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vminu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vminu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vminu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vminu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vminu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vminu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vminu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vminu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vminu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vminu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vminu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vminu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vminu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vminu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vminu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vminu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vminu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vminu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vminu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vminu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vminu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vminu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vminu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vminu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vminu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vminu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vminu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vminu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vminu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vminu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vminu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vminu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vminu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vminu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vminu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vminu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vminu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vminu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vminu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vminu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vminu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vminu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vminu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vminu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vminu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vminu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vminu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vminu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vminu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vminu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vminu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vminu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vminu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vminu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vminu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vminu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vminu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vminu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vminu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vminu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vminu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vminu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vminu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vminu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vminu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vminu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vminu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vminu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vminu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vminu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vminu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vminu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vminu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vminu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vminu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vminu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vminu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vminu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vminu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vminu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vminu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vminu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vminu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vminu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vminu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vminu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vminu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vminu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vminu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vminu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vminu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vminu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vminu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vminu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vminu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vminu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vminu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vminu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vminu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vminu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vminu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vminu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vminu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmin_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmin_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmin_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmin_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmin_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmin_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmin_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmin_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmin_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmin_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmin_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmin_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmin_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmin_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmin_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmin_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmin_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmin_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmin_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmin_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmin_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vmin_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmin_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmin.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmin_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmin_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vminu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vminu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vminu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vminu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vminu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vminu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vminu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vminu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vminu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vminu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vminu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vminu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vminu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vminu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vminu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vminu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vminu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vminu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vminu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vminu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vminu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vminu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vminu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vminu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vminu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vminu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c index f7d7e8b0..294ac86 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmul.c @@ -1980,1986 +1980,3967 @@ vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) { +vint64m8_t test_vmulhsu_vx_i64m8 (vint64m8_t op1, uint64_t op2, size_t vl) { return vmulhsu_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vmul_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vmul_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vmul_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vmul_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vmul_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vmul_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vmul_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vmul_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vmul_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vmul_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vmul_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vmul_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vmul_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vmul_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vmul_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vmul_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vmul_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vmul_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vmul_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vmul_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vmul_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vmul_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vmul_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vmul_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vmul_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vmul_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vmul_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vmul_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vmul_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vmul_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vmul_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vmul_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vmul_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vmul_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vmul_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vmul_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vmul_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vmul_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vmul_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vmul_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vmul_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vmul_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vmul_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vmul_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmul_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vmul_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmul_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmul_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vmul_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmul_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vmul_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmul_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmul_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vmul_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmul_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vmul_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmul_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmul_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vmul_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmul_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vmul_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmul_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmul_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vmul_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmul_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vmul_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmul_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmul_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vmul_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmul_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vmul_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmul_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmul_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vmul_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmul_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vmul_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmul_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmul_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vmul_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmul_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmul_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vmul_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmul_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmul_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vmul_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmul_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmul_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vmul_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmul_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmul_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vmul_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmul_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmul_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vmul_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmul_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmul_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vmul_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmul_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmul_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vmul_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmul_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmul_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vmul_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmul_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmul_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vmul_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmul_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmul_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vmul_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmul_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmul_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vmul_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmul_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmul_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vmul_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmul_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmul_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vmul_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmul_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmul_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vmul_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmul_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmul_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vmul_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmul_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmul_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vmul_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmul_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmul_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vmul_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmul_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmul_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vmul_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmul_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmul_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vmul_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmul_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmul_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vmul_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmul_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmul_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vmul_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmul_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmul_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vmul_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmul_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vmul_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmul_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vmul_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmul_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vmul_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmul_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vmul_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmul_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vmul_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmul_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vmul_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmul_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vmul_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmul_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmul_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vmul_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmul_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vmulh_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vmulh_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulh_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vmulh_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vmulh_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vmulh_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulh_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vmulh_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vmulh_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vmulh_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulh_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vmulh_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vmulh_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vmulh_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulh_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vmulh_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vmulh_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vmulh_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulh_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vmulh_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vmulh_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vmulh_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulh_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vmulh_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vmulh_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vmulh_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulh_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vmulh_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vmulh_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vmulh_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vmulh_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulh_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vmulh_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vmulh_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vmulh_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vmulh_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulh_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vmulh_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vmulh_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vmulh_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vmulh_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulh_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vmulh_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vmulh_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vmulh_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vmulh_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulh_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vmulh_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vmulh_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vmulh_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vmulh_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulh_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vmulh_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vmulh_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vmulh_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vmulh_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulh_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vmulh_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vmulh_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vmulh_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vmulh_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulh_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vmulh_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vmulh_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vmulh_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vmulh_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulh_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vmulh_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vmulh_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vmulh_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vmulh_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulh_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vmulh_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vmulh_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vmulh_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vmulh_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulh_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vmulh_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vmulh_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vmulh_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vmulh_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vmulh_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vmulh_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vmulh_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vmulh_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vmulh_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vmulh_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vmulh_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vmulh_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vmulh_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vmulhu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmulhu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmulhu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vmulhu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vmulhu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmulhu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmulhu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vmulhu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vmulhu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmulhu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmulhu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vmulhu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vmulhu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmulhu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmulhu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vmulhu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vmulhu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmulhu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmulhu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vmulhu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vmulhu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmulhu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmulhu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vmulhu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vmulhu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmulhu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmulhu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vmulhu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vmulhu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vmulhu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmulhu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmulhu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vmulhu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vmulhu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vmulhu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmulhu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmulhu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vmulhu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vmulhu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vmulhu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmulhu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmulhu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vmulhu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vmulhu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vmulhu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmulhu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmulhu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vmulhu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vmulhu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vmulhu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmulhu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmulhu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vmulhu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vmulhu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vmulhu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmulhu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmulhu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vmulhu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vmulhu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vmulhu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmulhu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmulhu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vmulhu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vmulhu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vmulhu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmulhu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmulhu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vmulhu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vmulhu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vmulhu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmulhu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmulhu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vmulhu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vmulhu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vmulhu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmulhu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmulhu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vmulhu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vmulhu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vmulhu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmulhu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vmulhu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vmulhu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vmulhu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vmulhu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vmulhu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vmulhu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vmulhu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vmulhu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vmulhu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vint8mf8_t test_vmulhsu_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vmulhsu_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmulhsu_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { +vint8mf8_t test_vmulhsu_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vint8mf4_t test_vmulhsu_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vmulhsu_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmulhsu_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { +vint8mf4_t test_vmulhsu_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vint8mf2_t test_vmulhsu_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vmulhsu_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmulhsu_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { +vint8mf2_t test_vmulhsu_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { +vint8m1_t test_vmulhsu_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { return vmulhsu_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmulhsu_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { +vint8m1_t test_vmulhsu_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { +vint8m2_t test_vmulhsu_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { return vmulhsu_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmulhsu_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { +vint8m2_t test_vmulhsu_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { +vint8m4_t test_vmulhsu_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { return vmulhsu_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmulhsu_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { +vint8m4_t test_vmulhsu_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { +vint8m8_t test_vmulhsu_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl) { return vmulhsu_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmulhsu_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { +vint8m8_t test_vmulhsu_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl) { return vmulhsu_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vint16mf4_t test_vmulhsu_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vmulhsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmulhsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { +vint16mf4_t test_vmulhsu_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { return vmulhsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vint16mf2_t test_vmulhsu_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vmulhsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmulhsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { +vint16mf2_t test_vmulhsu_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { return vmulhsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { +vint16m1_t test_vmulhsu_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { return vmulhsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmulhsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { +vint16m1_t test_vmulhsu_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { return vmulhsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { +vint16m2_t test_vmulhsu_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { return vmulhsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmulhsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { +vint16m2_t test_vmulhsu_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { return vmulhsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { +vint16m4_t test_vmulhsu_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { return vmulhsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmulhsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { +vint16m4_t test_vmulhsu_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { return vmulhsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { +vint16m8_t test_vmulhsu_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { return vmulhsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmulhsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { +vint16m8_t test_vmulhsu_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl) { return vmulhsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vint32mf2_t test_vmulhsu_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vmulhsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmulhsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { +vint32mf2_t test_vmulhsu_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { return vmulhsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { +vint32m1_t test_vmulhsu_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { return vmulhsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmulhsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { +vint32m1_t test_vmulhsu_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { return vmulhsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { +vint32m2_t test_vmulhsu_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { return vmulhsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmulhsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { +vint32m2_t test_vmulhsu_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { return vmulhsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { +vint32m4_t test_vmulhsu_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { return vmulhsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmulhsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { +vint32m4_t test_vmulhsu_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { return vmulhsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { +vint32m8_t test_vmulhsu_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl) { return vmulhsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { +vint32m8_t test_vmulhsu_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) { return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { +vint64m1_t test_vmulhsu_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) { return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { +vint64m1_t test_vmulhsu_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) { return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { +vint64m2_t test_vmulhsu_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) { return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { +vint64m2_t test_vmulhsu_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) { return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { +vint64m4_t test_vmulhsu_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) { return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { +vint64m4_t test_vmulhsu_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) { return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { +vint64m8_t test_vmulhsu_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) { return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { +vint64m8_t test_vmulhsu_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) { return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmul_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmul_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmul_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmul_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmul_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmul_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmul_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmul_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmul_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmul_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmul_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmul_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmul_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmul_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmul_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmul_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmul_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmul_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmul_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmul_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmul_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmul_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmul_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmul_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmul_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmul_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmul_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmul_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmul_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmul_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmul_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmul_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmul_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmul_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmul_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmul_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmul_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmul_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmul_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmul_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmul_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmul_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmul_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vmul_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmul_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmul_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmul_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulh_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulh_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulh_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulh_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulh_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulh_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulh_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulh_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulh_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulh_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulh_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulh_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulh_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulh_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulh_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulh_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulh_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulh_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulh_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulh_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulh_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vmulh_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulh.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulh_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vmulh_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmulhu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmulhu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmulhu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmulhu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmulhu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmulhu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmulhu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmulhu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmulhu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmulhu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmulhu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmulhu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmulhu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmulhu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmulhu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmulhu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmulhu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmulhu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmulhu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmulhu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmulhu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vmulhu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmulhu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmulhsu_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmulhsu_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmulhsu_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmulhsu_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmulhsu_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmulhsu_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmulhsu_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmulhsu_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmulhsu_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmulhsu_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmulhsu_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmulhsu_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmulhsu_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmulhsu_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmulhsu_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmulhsu_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmulhsu_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmulhsu_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmulhsu_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmulhsu_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmulhsu_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmulhsu_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vmulhsu_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c index f0997d9..bce92a9 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c @@ -798,6 +798,60 @@ vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) { return vmv_v_x_u64m8(src, vl); } +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4(vfloat16mf4_t src, size_t vl) { + return vmv_v_v_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2(vfloat16mf2_t src, size_t vl) { + return vmv_v_v_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1(vfloat16m1_t src, size_t vl) { + return vmv_v_v_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2(vfloat16m2_t src, size_t vl) { + return vmv_v_v_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4(vfloat16m4_t src, size_t vl) { + return vmv_v_v_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8(vfloat16m8_t src, size_t vl) { + return vmv_v_v_f16m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[SRC:%.*]], i64 [[VL:%.*]]) @@ -888,7 +942,7 @@ int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x_s_i8mf8_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) { @@ -936,7 +990,7 @@ int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x_s_i8m1_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) { @@ -952,7 +1006,7 @@ int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x_s_i8m2_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) { @@ -968,7 +1022,7 @@ int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x_s_i8m4_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) { @@ -984,7 +1038,7 @@ int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x_s_i8m8_i8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) { @@ -1002,7 +1056,7 @@ int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) { @@ -1020,7 +1074,7 @@ int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) { @@ -1038,7 +1092,7 @@ int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) { @@ -1056,7 +1110,7 @@ int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) { @@ -1074,7 +1128,7 @@ int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) { @@ -1092,7 +1146,7 @@ int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) { @@ -1110,7 +1164,7 @@ int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) { @@ -1128,7 +1182,7 @@ int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) { @@ -1146,7 +1200,7 @@ int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) { @@ -1164,7 +1218,7 @@ int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) { @@ -1182,7 +1236,7 @@ int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) { @@ -1200,7 +1254,7 @@ int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) { @@ -1218,7 +1272,7 @@ int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) { @@ -1236,7 +1290,7 @@ int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) { @@ -1254,7 +1308,7 @@ int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) { @@ -1270,7 +1324,7 @@ uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x_s_u8mf8_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) { @@ -1286,7 +1340,7 @@ uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x_s_u8mf4_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) { @@ -1302,7 +1356,7 @@ uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x_s_u8mf2_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) { @@ -1318,7 +1372,7 @@ uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x_s_u8m1_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) { @@ -1334,7 +1388,7 @@ uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x_s_u8m2_u8(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) { @@ -1636,63 +1690,10 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) { - return vmv_s_x_u64m8(dst, src, vl); -} - -// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) { - return vmv_v_v_f16mf4(src, vl); -} - -// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) { - return vmv_v_v_f16mf2(src, vl); -} - -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) { - return vmv_v_v_f16m1(src, vl); -} - -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) { - return vmv_v_v_f16m2(src, vl); -} - -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) { - return vmv_v_v_f16m4(src, vl); +vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dest, uint64_t src, size_t vl) { + return vmv_s_x_u64m8(dest, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) { - return vmv_v_v_f16m8(src, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c index 11aa325..1075c05 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnclip.c @@ -555,636 +555,1087 @@ vuint32m4_t test_vnclipu_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4 (vuint64m8_t op1, size_t shift, size_t vl) { return vnclipu_wx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint16mf4_t op1, vuint8mf8_t shift, - size_t vl) { +vint8mf8_t test_vnclip_wv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { return vnclip_wv_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnclip_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint16mf4_t op1, size_t shift, size_t vl) { +vint8mf8_t test_vnclip_wx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { return vnclip_wx_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint16mf2_t op1, vuint8mf4_t shift, - size_t vl) { +vint8mf4_t test_vnclip_wv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { return vnclip_wv_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnclip_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint16mf2_t op1, size_t shift, size_t vl) { +vint8mf4_t test_vnclip_wx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { return vnclip_wx_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint16m1_t op1, vuint8mf2_t shift, - size_t vl) { +vint8mf2_t test_vnclip_wv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { return vnclip_wv_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnclip_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint16m1_t op1, size_t shift, size_t vl) { +vint8mf2_t test_vnclip_wx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { return vnclip_wx_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint16m2_t op1, vuint8m1_t shift, size_t vl) { +vint8m1_t test_vnclip_wv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { return vnclip_wv_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnclip_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint16m2_t op1, size_t shift, size_t vl) { +vint8m1_t test_vnclip_wx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { return vnclip_wx_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint16m4_t op1, vuint8m2_t shift, size_t vl) { +vint8m2_t test_vnclip_wv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { return vnclip_wv_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnclip_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint16m4_t op1, size_t shift, size_t vl) { +vint8m2_t test_vnclip_wx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { return vnclip_wx_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint16m8_t op1, vuint8m4_t shift, size_t vl) { +vint8m4_t test_vnclip_wv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { return vnclip_wv_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnclip_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint16m8_t op1, size_t shift, size_t vl) { +vint8m4_t test_vnclip_wx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { return vnclip_wx_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint32mf2_t op1, vuint16mf4_t shift, - size_t vl) { +vint16mf4_t test_vnclip_wv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { return vnclip_wv_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnclip_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint32mf2_t op1, size_t shift, size_t vl) { +vint16mf4_t test_vnclip_wx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { return vnclip_wx_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint32m1_t op1, vuint16mf2_t shift, - size_t vl) { +vint16mf2_t test_vnclip_wv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { return vnclip_wv_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnclip_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint32m1_t op1, size_t shift, size_t vl) { +vint16mf2_t test_vnclip_wx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { return vnclip_wx_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint32m2_t op1, vuint16m1_t shift, - size_t vl) { +vint16m1_t test_vnclip_wv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { return vnclip_wv_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnclip_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint32m2_t op1, size_t shift, size_t vl) { +vint16m1_t test_vnclip_wx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { return vnclip_wx_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint32m4_t op1, vuint16m2_t shift, - size_t vl) { +vint16m2_t test_vnclip_wv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { return vnclip_wv_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnclip_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint32m4_t op1, size_t shift, size_t vl) { +vint16m2_t test_vnclip_wx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { return vnclip_wx_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint32m8_t op1, vuint16m4_t shift, - size_t vl) { +vint16m4_t test_vnclip_wv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { return vnclip_wv_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnclip_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint32m8_t op1, size_t shift, size_t vl) { +vint16m4_t test_vnclip_wx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { return vnclip_wx_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint64m1_t op1, vuint32mf2_t shift, - size_t vl) { +vint32mf2_t test_vnclip_wv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { return vnclip_wv_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnclip_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint64m1_t op1, size_t shift, size_t vl) { +vint32mf2_t test_vnclip_wx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { return vnclip_wx_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint64m2_t op1, vuint32m1_t shift, - size_t vl) { +vint32m1_t test_vnclip_wv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { return vnclip_wv_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnclip_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint64m2_t op1, size_t shift, size_t vl) { +vint32m1_t test_vnclip_wx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { return vnclip_wx_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint64m4_t op1, vuint32m2_t shift, - size_t vl) { +vint32m2_t test_vnclip_wv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { return vnclip_wv_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnclip_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint64m4_t op1, size_t shift, size_t vl) { +vint32m2_t test_vnclip_wx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { return vnclip_wx_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint64m8_t op1, vuint32m4_t shift, - size_t vl) { +vint32m4_t test_vnclip_wv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { return vnclip_wv_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnclip_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint64m8_t op1, size_t shift, size_t vl) { +vint32m4_t test_vnclip_wx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vnclip_wx_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint16mf4_t op1, vuint8mf8_t shift, - size_t vl) { +vuint8mf8_t test_vnclipu_wv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { return vnclipu_wv_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnclipu_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint16mf4_t op1, size_t shift, size_t vl) { +vuint8mf8_t test_vnclipu_wx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { return vnclipu_wx_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint16mf2_t op1, vuint8mf4_t shift, - size_t vl) { +vuint8mf4_t test_vnclipu_wv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { return vnclipu_wv_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnclipu_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint16mf2_t op1, size_t shift, size_t vl) { +vuint8mf4_t test_vnclipu_wx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { return vnclipu_wx_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint16m1_t op1, vuint8mf2_t shift, - size_t vl) { +vuint8mf2_t test_vnclipu_wv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { return vnclipu_wv_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnclipu_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint16m1_t op1, size_t shift, size_t vl) { +vuint8mf2_t test_vnclipu_wx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { return vnclipu_wx_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint16m2_t op1, vuint8m1_t shift, - size_t vl) { +vuint8m1_t test_vnclipu_wv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { return vnclipu_wv_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnclipu_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint16m2_t op1, size_t shift, size_t vl) { +vuint8m1_t test_vnclipu_wx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { return vnclipu_wx_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint16m4_t op1, vuint8m2_t shift, - size_t vl) { +vuint8m2_t test_vnclipu_wv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { return vnclipu_wv_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnclipu_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint16m4_t op1, size_t shift, size_t vl) { +vuint8m2_t test_vnclipu_wx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { return vnclipu_wx_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint16m8_t op1, vuint8m4_t shift, - size_t vl) { +vuint8m4_t test_vnclipu_wv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { return vnclipu_wv_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnclipu_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint16m8_t op1, size_t shift, size_t vl) { +vuint8m4_t test_vnclipu_wx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { return vnclipu_wx_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t shift, - size_t vl) { +vuint16mf4_t test_vnclipu_wv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { return vnclipu_wv_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnclipu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint32mf2_t op1, size_t shift, - size_t vl) { +vuint16mf4_t test_vnclipu_wx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { return vnclipu_wx_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint32m1_t op1, vuint16mf2_t shift, - size_t vl) { +vuint16mf2_t test_vnclipu_wv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { return vnclipu_wv_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnclipu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint32m1_t op1, size_t shift, - size_t vl) { +vuint16mf2_t test_vnclipu_wx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { return vnclipu_wx_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint32m2_t op1, vuint16m1_t shift, - size_t vl) { +vuint16m1_t test_vnclipu_wv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { return vnclipu_wv_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnclipu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint32m2_t op1, size_t shift, size_t vl) { +vuint16m1_t test_vnclipu_wx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { return vnclipu_wx_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint32m4_t op1, vuint16m2_t shift, - size_t vl) { +vuint16m2_t test_vnclipu_wv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { return vnclipu_wv_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnclipu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint32m4_t op1, size_t shift, size_t vl) { +vuint16m2_t test_vnclipu_wx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { return vnclipu_wx_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint32m8_t op1, vuint16m4_t shift, - size_t vl) { +vuint16m4_t test_vnclipu_wv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { return vnclipu_wv_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnclipu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint32m8_t op1, size_t shift, size_t vl) { +vuint16m4_t test_vnclipu_wx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { return vnclipu_wx_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint64m1_t op1, vuint32mf2_t shift, - size_t vl) { +vuint32mf2_t test_vnclipu_wv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { return vnclipu_wv_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnclipu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint64m1_t op1, size_t shift, - size_t vl) { +vuint32mf2_t test_vnclipu_wx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { return vnclipu_wx_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint64m2_t op1, vuint32m1_t shift, - size_t vl) { +vuint32m1_t test_vnclipu_wv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { return vnclipu_wv_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnclipu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint64m2_t op1, size_t shift, size_t vl) { +vuint32m1_t test_vnclipu_wx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { return vnclipu_wx_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint64m4_t op1, vuint32m2_t shift, - size_t vl) { +vuint32m2_t test_vnclipu_wv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { return vnclipu_wv_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnclipu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint64m4_t op1, size_t shift, size_t vl) { +vuint32m2_t test_vnclipu_wx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { return vnclipu_wx_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint64m8_t op1, vuint32m4_t shift, - size_t vl) { +vuint32m4_t test_vnclipu_wv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { return vnclipu_wv_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnclipu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint64m8_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vnclipu_wx_u32m4_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnclip_wx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnclip_wx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnclip_wx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnclip_wx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnclip_wx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnclip_wx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnclip_wx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnclip_wx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnclip_wx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnclip_wx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnclip_wx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnclip_wx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnclip_wx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnclip_wx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vnclip_wv_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclip_wx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnclip_wx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclip_wx_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnclipu_wx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnclipu_wx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnclipu_wx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnclipu_wx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnclipu_wx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnclipu_wx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnclipu_wx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnclipu_wx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnclipu_wx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnclipu_wx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnclipu_wx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnclipu_wx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnclipu_wx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnclipu_wx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vnclipu_wv_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnclipu_wx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnclipu_wx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnclipu_wx_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c index f316e16..ee5f061 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c @@ -537,10 +537,280 @@ vuint32m2_t test_vncvt_x_x_w_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vui // CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl) { return vncvt_x_x_w_u32m4_m(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vncvt_x_x_w_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i8mf8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vncvt_x_x_w_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i8mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vncvt_x_x_w_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i8mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vncvt_x_x_w_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i8m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vncvt_x_x_w_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i8m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vncvt_x_x_w_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i8m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vncvt_x_x_w_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u8mf8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vncvt_x_x_w_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u8mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vncvt_x_x_w_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u8mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vncvt_x_x_w_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u8m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vncvt_x_x_w_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u8m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vncvt_x_x_w_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u8m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vncvt_x_x_w_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vncvt_x_x_w_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vncvt_x_x_w_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vncvt_x_x_w_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vncvt_x_x_w_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vncvt_x_x_w_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vncvt_x_x_w_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vncvt_x_x_w_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vncvt_x_x_w_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vncvt_x_x_w_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vncvt_x_x_w_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vncvt_x_x_w_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vncvt_x_x_w_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vncvt_x_x_w_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vncvt_x_x_w_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vncvt_x_x_w_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vncvt_x_x_w_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vncvt_x_x_w_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t src, size_t vl, uint8_t ta) { + return vncvt_x_x_w_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c index 651d25d..960e32e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c @@ -394,10 +394,208 @@ vint64m4_t test_vneg_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t // CHECK-RV64-LABEL: @test_vneg_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl) { return vneg_v_i64m8_m(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: @test_vneg_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vneg_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8mf8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vneg_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vneg_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vneg_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vneg_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vneg_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vneg_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl, uint8_t ta) { + return vneg_v_i8m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vneg_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl, uint8_t ta) { + return vneg_v_i16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vneg_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vneg_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl, uint8_t ta) { + return vneg_v_i16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vneg_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vneg_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl, uint8_t ta) { + return vneg_v_i16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vneg_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl, uint8_t ta) { + return vneg_v_i16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vneg_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vneg_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl, uint8_t ta) { + return vneg_v_i32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vneg_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vneg_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl, uint8_t ta) { + return vneg_v_i32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vneg_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl, uint8_t ta) { + return vneg_v_i32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vneg_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl, uint8_t ta) { + return vneg_v_i64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vneg_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl, uint8_t ta) { + return vneg_v_i64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vneg_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl, uint8_t ta) { + return vneg_v_i64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vneg_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vneg_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl, uint8_t ta) { + return vneg_v_i64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c index c45a2ff..b12d272 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c @@ -790,10 +790,406 @@ vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m // CHECK-RV64-LABEL: @test_vnot_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) { return vnot_v_u64m8_m(mask, maskedoff, op1, vl); } +// CHECK-RV64-LABEL: @test_vnot_v_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnot_v_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8mf8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnot_v_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnot_v_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnot_v_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnot_v_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnot_v_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vnot_v_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_i8m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnot_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t vl, uint8_t ta) { + return vnot_v_i16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnot_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnot_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_i16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnot_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnot_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_i16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vnot_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_i16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnot_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnot_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_i32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnot_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnot_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_i32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vnot_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_i32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vnot_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_i64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vnot_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_i64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vnot_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_i64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vnot_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_i64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnot_v_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8mf8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnot_v_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnot_v_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnot_v_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnot_v_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnot_v_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vnot_v_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_u8m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnot_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t vl, uint8_t ta) { + return vnot_v_u16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnot_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnot_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_u16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnot_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnot_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_u16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vnot_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_u16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnot_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnot_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_u32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnot_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnot_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_u32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vnot_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_u32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vnot_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t vl, uint8_t ta) { + return vnot_v_u64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vnot_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t vl, uint8_t ta) { + return vnot_v_u64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vnot_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t vl, uint8_t ta) { + return vnot_v_u64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnot_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 -1, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vnot_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl, uint8_t ta) { + return vnot_v_u64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c index 21ca9ab..e98e63b 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsra.c @@ -270,276 +270,547 @@ vint32m4_t test_vnsra_wv_i32m4(vint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wx_i32m4(vint64m8_t op1, size_t shift, size_t vl) { +vint32m4_t test_vnsra_wx_i32m4 (vint64m8_t op1, size_t shift, size_t vl) { return vnsra_wx_i32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnsra_wv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { +vint8mf8_t test_vnsra_wv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl) { return vnsra_wv_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vnsra_wx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { +vint8mf8_t test_vnsra_wx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { return vnsra_wx_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnsra_wv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { +vint8mf4_t test_vnsra_wv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl) { return vnsra_wv_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vnsra_wx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { +vint8mf4_t test_vnsra_wx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { return vnsra_wx_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnsra_wv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { +vint8mf2_t test_vnsra_wv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl) { return vnsra_wv_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vnsra_wx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { +vint8mf2_t test_vnsra_wx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { return vnsra_wx_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnsra_wv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { +vint8m1_t test_vnsra_wv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl) { return vnsra_wv_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vnsra_wx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { +vint8m1_t test_vnsra_wx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { return vnsra_wx_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnsra_wv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { +vint8m2_t test_vnsra_wv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl) { return vnsra_wv_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vnsra_wx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { +vint8m2_t test_vnsra_wx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { return vnsra_wx_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnsra_wv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { +vint8m4_t test_vnsra_wv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl) { return vnsra_wv_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vnsra_wx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { +vint8m4_t test_vnsra_wx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { return vnsra_wx_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnsra_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { +vint16mf4_t test_vnsra_wv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl) { return vnsra_wv_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vnsra_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { +vint16mf4_t test_vnsra_wx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { return vnsra_wx_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnsra_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { +vint16mf2_t test_vnsra_wv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl) { return vnsra_wv_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vnsra_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { +vint16mf2_t test_vnsra_wx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { return vnsra_wx_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnsra_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { +vint16m1_t test_vnsra_wv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl) { return vnsra_wv_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vnsra_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { +vint16m1_t test_vnsra_wx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { return vnsra_wx_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnsra_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { +vint16m2_t test_vnsra_wv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl) { return vnsra_wv_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vnsra_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { +vint16m2_t test_vnsra_wx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { return vnsra_wx_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnsra_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { +vint16m4_t test_vnsra_wv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl) { return vnsra_wv_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vnsra_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { +vint16m4_t test_vnsra_wx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { return vnsra_wx_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnsra_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { +vint32mf2_t test_vnsra_wv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl) { return vnsra_wv_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vnsra_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { +vint32mf2_t test_vnsra_wx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { return vnsra_wx_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnsra_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { +vint32m1_t test_vnsra_wv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl) { return vnsra_wv_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vnsra_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { +vint32m1_t test_vnsra_wx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { return vnsra_wx_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnsra_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { +vint32m2_t test_vnsra_wv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl) { return vnsra_wv_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vnsra_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { +vint32m2_t test_vnsra_wx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { return vnsra_wx_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { +vint32m4_t test_vnsra_wv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl) { return vnsra_wv_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vnsra_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { +vint32m4_t test_vnsra_wx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vnsra_wx_i32m4_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnsra_wv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vnsra_wx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnsra_wv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vnsra_wx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnsra_wv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vnsra_wx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnsra_wv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vnsra_wx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnsra_wv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vnsra_wx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnsra_wv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vnsra_wx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnsra_wv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vnsra_wx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnsra_wv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vnsra_wx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnsra_wv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vnsra_wx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnsra_wv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vnsra_wx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnsra_wv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vnsra_wx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vnsra_wx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnsra_wv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vnsra_wx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnsra_wv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vnsra_wx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnsra_wv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vnsra_wv_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsra_wx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsra.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vnsra_wx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsra_wx_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c index 27643f4..0488a89 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnsrl.c @@ -270,276 +270,547 @@ vuint32m4_t test_vnsrl_wv_u32m4(vuint64m8_t op1, vuint32m4_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wx_u32m4(vuint64m8_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vnsrl_wx_u32m4 (vuint64m8_t op1, size_t shift, size_t vl) { return vnsrl_wx_u32m4(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnsrl_wv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { +vuint8mf8_t test_vnsrl_wv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl) { return vnsrl_wv_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vnsrl_wx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { +vuint8mf8_t test_vnsrl_wx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { return vnsrl_wx_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnsrl_wv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { +vuint8mf4_t test_vnsrl_wv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl) { return vnsrl_wv_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vnsrl_wx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { +vuint8mf4_t test_vnsrl_wx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { return vnsrl_wx_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnsrl_wv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { +vuint8mf2_t test_vnsrl_wv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl) { return vnsrl_wv_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vnsrl_wx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { +vuint8mf2_t test_vnsrl_wx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { return vnsrl_wx_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnsrl_wv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { +vuint8m1_t test_vnsrl_wv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl) { return vnsrl_wv_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vnsrl_wx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { +vuint8m1_t test_vnsrl_wx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { return vnsrl_wx_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnsrl_wv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { +vuint8m2_t test_vnsrl_wv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl) { return vnsrl_wv_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vnsrl_wx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { +vuint8m2_t test_vnsrl_wx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { return vnsrl_wx_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnsrl_wv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { +vuint8m4_t test_vnsrl_wv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl) { return vnsrl_wv_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vnsrl_wx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { +vuint8m4_t test_vnsrl_wx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { return vnsrl_wx_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnsrl_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { +vuint16mf4_t test_vnsrl_wv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl) { return vnsrl_wv_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vnsrl_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { +vuint16mf4_t test_vnsrl_wx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { return vnsrl_wx_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnsrl_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { +vuint16mf2_t test_vnsrl_wv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl) { return vnsrl_wv_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vnsrl_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { +vuint16mf2_t test_vnsrl_wx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { return vnsrl_wx_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnsrl_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { +vuint16m1_t test_vnsrl_wv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl) { return vnsrl_wv_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vnsrl_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { +vuint16m1_t test_vnsrl_wx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { return vnsrl_wx_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnsrl_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { +vuint16m2_t test_vnsrl_wv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl) { return vnsrl_wv_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vnsrl_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { +vuint16m2_t test_vnsrl_wx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { return vnsrl_wx_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnsrl_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { +vuint16m4_t test_vnsrl_wv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl) { return vnsrl_wv_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vnsrl_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { +vuint16m4_t test_vnsrl_wx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { return vnsrl_wx_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnsrl_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { +vuint32mf2_t test_vnsrl_wv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl) { return vnsrl_wv_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vnsrl_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { +vuint32mf2_t test_vnsrl_wx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { return vnsrl_wx_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnsrl_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { +vuint32m1_t test_vnsrl_wv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl) { return vnsrl_wv_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vnsrl_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { +vuint32m1_t test_vnsrl_wx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { return vnsrl_wx_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnsrl_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { +vuint32m2_t test_vnsrl_wv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl) { return vnsrl_wv_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vnsrl_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { +vuint32m2_t test_vnsrl_wx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { return vnsrl_wx_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { +vuint32m4_t test_vnsrl_wv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl) { return vnsrl_wv_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vnsrl_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vnsrl_wx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vnsrl_wx_u32m4_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnsrl_wv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i8.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vnsrl_wx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnsrl_wv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i8.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vnsrl_wx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnsrl_wv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i8.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vnsrl_wx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnsrl_wv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i8.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vnsrl_wx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnsrl_wv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i8.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vnsrl_wx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnsrl_wv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv32i8.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vnsrl_wx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnsrl_wv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vnsrl_wx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnsrl_wv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i16.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vnsrl_wx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnsrl_wv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i16.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vnsrl_wx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnsrl_wv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i16.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vnsrl_wx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnsrl_wv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv16i16.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vnsrl_wx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv1i32.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vnsrl_wx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnsrl_wv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv2i32.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vnsrl_wx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnsrl_wv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv4i32.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vnsrl_wx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnsrl_wv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vnsrl_wv_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vnsrl_wx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vnsrl.mask.nxv8i32.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vnsrl_wx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vnsrl_wx_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c index 34fe0fa..8cd5320 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vor.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vor_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vor_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vor_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vor_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vor_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vor_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vor_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vor_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vor_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vor_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vor_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vor_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vor_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vor_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vor_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vor_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vor_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vor_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vor_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vor_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vor_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vor_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vor_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vor_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vor_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vor_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vor_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vor_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vor_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vor_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vor_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vor_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vor_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vor_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vor_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vor_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vor_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vor_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vor_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vor_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vor_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vor_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vor_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vor_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vor_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vor_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vor_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vor_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vor_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vor_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vor_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vor_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vor_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vor_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vor_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vor_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vor_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vor_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vor_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vor_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vor_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vor_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vor_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vor_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vor_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vor_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vor_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vor_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vor_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vor_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vor_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vor_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vor_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vor_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vor_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vor_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vor_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vor_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vor_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vor_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vor_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vor_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vor_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vor_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vor_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vor_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vor_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vor_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vor_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vor_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vor_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vor_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vor_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vor_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vor_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vor_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vor_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vor_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vor_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vor_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vor_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vor_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vor_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vor_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vor_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vor_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vor_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vor_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vor_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vor_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vor_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vor_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vor_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vor_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vor_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vor_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vor_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vor_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vor_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vor_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vor_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vor_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vor_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vor_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vor_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vor_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vor_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vor_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vor_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vor_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vor_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vor_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vor_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vor_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vor_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vor_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vor_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vor_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vor_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vor_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vor_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vor_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vor_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vor_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vor_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vor_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vor_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vor_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vor_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vor_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vor_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vor_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vor_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vor_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vor_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vor_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vor_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vor_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vor_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vor_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vor_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vor_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vor_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vor_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vor_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vor_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vor_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vor_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vor_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vor_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vor_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vor_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vor_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vor_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vor_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vor_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vor_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vor_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vor_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vor_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vor_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vor_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vor_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vor_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vor_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vor_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vor_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vor_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vor_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vor_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vor_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vor_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vor_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vor_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vor_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vor_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vor_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vor_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vor_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vor_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vor_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vor_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vor_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vor_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vor_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vor_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vor_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vor_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vor_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vor_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vor_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vor_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vor_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vor_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vor_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vor_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vor_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vor_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vor_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vor_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vor_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vor_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vor_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vor_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vor_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vor_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vor_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vor_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vor_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vor_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vor_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vor_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vor_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vor_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vor_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vor_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vor_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c index 272b4bd..9d65d8f 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c @@ -214,6 +214,222 @@ vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) { return vreinterpret_v_u16m8_i16m8(src); } +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4(vfloat16mf4_t src) { + return vreinterpret_v_f16mf4_i16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2(vfloat16mf2_t src) { + return vreinterpret_v_f16mf2_i16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_f16m1_i16m1(vfloat16m1_t src) { + return vreinterpret_v_f16m1_i16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_f16m2_i16m2(vfloat16m2_t src) { + return vreinterpret_v_f16m2_i16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_f16m4_i16m4(vfloat16m4_t src) { + return vreinterpret_v_f16m4_i16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_f16m8_i16m8(vfloat16m8_t src) { + return vreinterpret_v_f16m8_i16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4(vfloat16mf4_t src) { + return vreinterpret_v_f16mf4_u16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2(vfloat16mf2_t src) { + return vreinterpret_v_f16mf2_u16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_f16m1_u16m1(vfloat16m1_t src) { + return vreinterpret_v_f16m1_u16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_f16m2_u16m2(vfloat16m2_t src) { + return vreinterpret_v_f16m2_u16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_f16m4_u16m4(vfloat16m4_t src) { + return vreinterpret_v_f16m4_u16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_f16m8_u16m8(vfloat16m8_t src) { + return vreinterpret_v_f16m8_u16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4(vint16mf4_t src) { + return vreinterpret_v_i16mf4_f16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2(vint16mf2_t src) { + return vreinterpret_v_i16mf2_f16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vreinterpret_v_i16m1_f16m1(vint16m1_t src) { + return vreinterpret_v_i16m1_f16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vreinterpret_v_i16m2_f16m2(vint16m2_t src) { + return vreinterpret_v_i16m2_f16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vreinterpret_v_i16m4_f16m4(vint16m4_t src) { + return vreinterpret_v_i16m4_f16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vreinterpret_v_i16m8_f16m8(vint16m8_t src) { + return vreinterpret_v_i16m8_f16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4(vuint16mf4_t src) { + return vreinterpret_v_u16mf4_f16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2(vuint16mf2_t src) { + return vreinterpret_v_u16mf2_f16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vreinterpret_v_u16m1_f16m1(vuint16m1_t src) { + return vreinterpret_v_u16m1_f16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vreinterpret_v_u16m2_f16m2(vuint16m2_t src) { + return vreinterpret_v_u16m2_f16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vreinterpret_v_u16m4_f16m4(vuint16m4_t src) { + return vreinterpret_v_u16m4_f16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vreinterpret_v_u16m8_f16m8(vuint16m8_t src) { + return vreinterpret_v_u16m8_f16m8(src); +} + // CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: ret [[SRC:%.*]] @@ -1686,222 +1902,7 @@ vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) { +vuint32m8_t test_vreinterpret_v_u64m8_u32m8 (vuint64m8_t src) { return vreinterpret_v_u64m8_u32m8(src); } -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vreinterpret_v_f16mf4_i16mf4 (vfloat16mf4_t src) { - return vreinterpret_v_f16mf4_i16mf4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vreinterpret_v_f16mf2_i16mf2 (vfloat16mf2_t src) { - return vreinterpret_v_f16mf2_i16mf2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vreinterpret_v_f16m1_i16m1 (vfloat16m1_t src) { - return vreinterpret_v_f16m1_i16m1(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vreinterpret_v_f16m2_i16m2 (vfloat16m2_t src) { - return vreinterpret_v_f16m2_i16m2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vreinterpret_v_f16m4_i16m4 (vfloat16m4_t src) { - return vreinterpret_v_f16m4_i16m4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_i16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vreinterpret_v_f16m8_i16m8 (vfloat16m8_t src) { - return vreinterpret_v_f16m8_i16m8(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf4_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vreinterpret_v_f16mf4_u16mf4 (vfloat16mf4_t src) { - return vreinterpret_v_f16mf4_u16mf4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16mf2_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vreinterpret_v_f16mf2_u16mf2 (vfloat16mf2_t src) { - return vreinterpret_v_f16mf2_u16mf2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m1_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vreinterpret_v_f16m1_u16m1 (vfloat16m1_t src) { - return vreinterpret_v_f16m1_u16m1(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m2_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vreinterpret_v_f16m2_u16m2 (vfloat16m2_t src) { - return vreinterpret_v_f16m2_u16m2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m4_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vreinterpret_v_f16m4_u16m4 (vfloat16m4_t src) { - return vreinterpret_v_f16m4_u16m4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_f16m8_u16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vreinterpret_v_f16m8_u16m8 (vfloat16m8_t src) { - return vreinterpret_v_f16m8_u16m8(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vreinterpret_v_i16mf4_f16mf4 (vint16mf4_t src) { - return vreinterpret_v_i16mf4_f16mf4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vreinterpret_v_i16mf2_f16mf2 (vint16mf2_t src) { - return vreinterpret_v_i16mf2_f16mf2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vreinterpret_v_i16m1_f16m1 (vint16m1_t src) { - return vreinterpret_v_i16m1_f16m1(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vreinterpret_v_i16m2_f16m2 (vint16m2_t src) { - return vreinterpret_v_i16m2_f16m2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vreinterpret_v_i16m4_f16m4 (vint16m4_t src) { - return vreinterpret_v_i16m4_f16m4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vreinterpret_v_i16m8_f16m8 (vint16m8_t src) { - return vreinterpret_v_i16m8_f16m8(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vreinterpret_v_u16mf4_f16mf4 (vuint16mf4_t src) { - return vreinterpret_v_u16mf4_f16mf4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vreinterpret_v_u16mf2_f16mf2 (vuint16mf2_t src) { - return vreinterpret_v_u16mf2_f16mf2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vreinterpret_v_u16m1_f16m1 (vuint16m1_t src) { - return vreinterpret_v_u16m1_f16m1(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vreinterpret_v_u16m2_f16m2 (vuint16m2_t src) { - return vreinterpret_v_u16m2_f16m2(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vreinterpret_v_u16m4_f16m4 (vuint16m4_t src) { - return vreinterpret_v_u16m4_f16m4(src); -} - -// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m8_t test_vreinterpret_v_u16m8_f16m8 (vuint16m8_t src) { - return vreinterpret_v_u16m8_f16m8(src); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c index 9728285..6c0da53 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrem.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vremu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vremu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vremu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrem_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vrem_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vrem_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrem_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vrem_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vrem_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrem_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vrem_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vrem_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrem_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vrem_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vrem_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrem_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vrem_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vrem_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrem_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vrem_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vrem_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrem_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vrem_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vrem_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrem_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vrem_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vrem_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrem_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vrem_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vrem_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrem_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vrem_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vrem_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrem_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vrem_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vrem_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrem_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vrem_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vrem_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrem_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vrem_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vrem_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrem_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vrem_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vrem_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrem_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vrem_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vrem_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrem_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vrem_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vrem_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrem_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vrem_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vrem_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrem_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vrem_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vrem_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrem_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vrem_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vrem_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrem_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vrem_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vrem_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrem_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vrem_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vrem_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrem_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vrem_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vrem_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrem_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vrem_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vrem_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrem_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vrem_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vrem_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrem_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vrem_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vrem_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrem_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vrem_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vrem_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrem_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vrem_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vrem_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrem_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vrem_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vrem_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrem_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vrem_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vrem_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrem_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vrem_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vrem_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrem_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vrem_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vrem_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrem_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vrem_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vrem_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrem_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vrem_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vrem_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrem_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vrem_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vrem_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrem_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vrem_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vrem_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrem_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vrem_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vrem_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrem_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vrem_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vrem_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrem_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vrem_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vrem_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrem_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vrem_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vrem_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrem_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vrem_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vrem_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrem_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vrem_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vrem_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrem_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vrem_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vrem_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrem_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vrem_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vrem_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrem_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrem_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vrem_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vrem_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vremu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vremu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vremu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vremu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vremu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vremu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vremu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vremu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vremu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vremu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vremu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vremu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vremu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vremu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vremu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vremu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vremu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vremu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vremu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vremu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vremu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vremu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vremu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vremu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vremu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vremu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vremu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vremu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vremu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vremu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vremu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vremu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vremu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vremu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vremu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vremu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vremu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vremu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vremu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vremu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vremu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vremu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vremu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vremu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vremu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vremu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vremu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vremu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vremu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vremu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vremu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vremu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vremu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vremu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vremu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vremu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vremu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vremu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vremu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vremu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vremu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vremu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vremu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vremu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vremu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vremu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vremu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vremu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vremu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vremu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vremu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vremu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vremu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vremu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vremu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vremu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vremu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vremu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vremu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vremu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vremu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vremu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vremu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vremu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vremu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vremu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vremu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vremu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vremu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vremu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vremu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vremu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vremu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vremu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vremu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vremu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vremu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vremu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vremu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vremu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vremu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vremu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vremu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vremu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vremu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vremu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vremu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vremu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vremu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vremu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vremu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vremu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vremu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vremu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vremu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vremu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vremu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vremu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vremu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vremu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vremu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vremu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vremu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vremu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vremu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vremu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrem_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrem_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrem_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrem_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrem_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrem_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrem_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrem_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrem_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrem_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrem_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrem_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrem_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrem_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrem_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrem_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrem_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrem_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrem_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrem_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrem_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vrem_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrem_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrem.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrem_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrem_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vremu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vremu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vremu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vremu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vremu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vremu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vremu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vremu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vremu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vremu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vremu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vremu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vremu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vremu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vremu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vremu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vremu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vremu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vremu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vremu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vremu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vremu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vremu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vremu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vremu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vremu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c index 9b2b01b..081bc79 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrgather.c @@ -837,6 +837,114 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { return vrgather_vx_u64m8(op1, index, vl); } +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vv_f16mf4(vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather_vv_f16mf4(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vx_f16mf4(vfloat16mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_f16mf4(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vv_f16mf2(vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather_vv_f16mf2(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vx_f16mf2(vfloat16mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f16mf2(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vv_f16m1(vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather_vv_f16m1(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vx_f16m1(vfloat16m1_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m1(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vv_f16m2(vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather_vv_f16m2(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vx_f16m2(vfloat16m2_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m2(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vv_f16m4(vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather_vv_f16m4(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vx_f16m4(vfloat16m4_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m4(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vv_f16m8(vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather_vv_f16m8(op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vx_f16m8(vfloat16m8_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m8(op1, index, vl); +} + // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f32.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) @@ -1511,2025 +1619,3157 @@ vfloat64m4_t test_vrgatherei16_vv_f64m4(vfloat64m4_t op1, vuint16m1_t op2, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f64.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgatherei16_vv_f64m8(vfloat64m8_t op1, vuint16m2_t op2, - size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8 (vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_f64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vuint8mf8_t index, - size_t vl) { +vint8mf8_t test_vrgather_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl) { return vrgather_vv_i8mf8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgather_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, size_t index, size_t vl) { +vint8mf8_t test_vrgather_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl) { return vrgather_vx_i8mf8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vuint8mf4_t index, - size_t vl) { +vint8mf4_t test_vrgather_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl) { return vrgather_vv_i8mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgather_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, size_t index, size_t vl) { +vint8mf4_t test_vrgather_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl) { return vrgather_vx_i8mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vuint8mf2_t index, - size_t vl) { +vint8mf2_t test_vrgather_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl) { return vrgather_vv_i8mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgather_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, size_t index, size_t vl) { +vint8mf2_t test_vrgather_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl) { return vrgather_vx_i8mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vuint8m1_t index, size_t vl) { +vint8m1_t test_vrgather_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { return vrgather_vv_i8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, size_t index, size_t vl) { +vint8m1_t test_vrgather_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { return vrgather_vx_i8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vuint8m2_t index, size_t vl) { +vint8m2_t test_vrgather_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { return vrgather_vv_i8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, size_t index, size_t vl) { +vint8m2_t test_vrgather_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { return vrgather_vx_i8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vuint8m4_t index, size_t vl) { +vint8m4_t test_vrgather_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { return vrgather_vv_i8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, size_t index, size_t vl) { +vint8m4_t test_vrgather_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { return vrgather_vx_i8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vuint8m8_t index, size_t vl) { +vint8m8_t test_vrgather_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { return vrgather_vv_i8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, size_t index, size_t vl) { +vint8m8_t test_vrgather_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { return vrgather_vx_i8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vuint16mf4_t index, - size_t vl) { +vint16mf4_t test_vrgather_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl) { return vrgather_vv_i16mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgather_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, size_t index, - size_t vl) { +vint16mf4_t test_vrgather_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl) { return vrgather_vx_i16mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vuint16mf2_t index, - size_t vl) { +vint16mf2_t test_vrgather_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl) { return vrgather_vv_i16mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgather_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, size_t index, - size_t vl) { +vint16mf2_t test_vrgather_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl) { return vrgather_vx_i16mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vuint16m1_t index, - size_t vl) { +vint16m1_t test_vrgather_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { return vrgather_vv_i16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, size_t index, size_t vl) { +vint16m1_t test_vrgather_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { return vrgather_vx_i16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vuint16m2_t index, - size_t vl) { +vint16m2_t test_vrgather_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { return vrgather_vv_i16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, size_t index, size_t vl) { +vint16m2_t test_vrgather_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { return vrgather_vx_i16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vuint16m4_t index, - size_t vl) { +vint16m4_t test_vrgather_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { return vrgather_vv_i16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, size_t index, size_t vl) { +vint16m4_t test_vrgather_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { return vrgather_vx_i16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vuint16m8_t index, - size_t vl) { +vint16m8_t test_vrgather_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { return vrgather_vv_i16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, size_t index, size_t vl) { +vint16m8_t test_vrgather_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { return vrgather_vx_i16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vuint32mf2_t index, - size_t vl) { +vint32mf2_t test_vrgather_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl) { return vrgather_vv_i32mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgather_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, size_t index, - size_t vl) { +vint32mf2_t test_vrgather_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl) { return vrgather_vx_i32mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vuint32m1_t index, - size_t vl) { +vint32m1_t test_vrgather_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { return vrgather_vv_i32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, size_t index, size_t vl) { +vint32m1_t test_vrgather_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { return vrgather_vx_i32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vuint32m2_t index, - size_t vl) { +vint32m2_t test_vrgather_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { return vrgather_vv_i32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, size_t index, size_t vl) { +vint32m2_t test_vrgather_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { return vrgather_vx_i32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vuint32m4_t index, - size_t vl) { +vint32m4_t test_vrgather_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { return vrgather_vv_i32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, size_t index, size_t vl) { +vint32m4_t test_vrgather_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { return vrgather_vx_i32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vuint32m8_t index, - size_t vl) { +vint32m8_t test_vrgather_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { return vrgather_vv_i32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, size_t index, size_t vl) { +vint32m8_t test_vrgather_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { return vrgather_vx_i32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vuint64m1_t index, - size_t vl) { +vint64m1_t test_vrgather_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { return vrgather_vv_i64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, size_t index, size_t vl) { +vint64m1_t test_vrgather_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { return vrgather_vx_i64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vuint64m2_t index, - size_t vl) { +vint64m2_t test_vrgather_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { return vrgather_vv_i64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, size_t index, size_t vl) { +vint64m2_t test_vrgather_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { return vrgather_vx_i64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vuint64m4_t index, - size_t vl) { +vint64m4_t test_vrgather_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { return vrgather_vv_i64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, size_t index, size_t vl) { +vint64m4_t test_vrgather_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { return vrgather_vx_i64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vuint64m8_t index, - size_t vl) { +vint64m8_t test_vrgather_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { return vrgather_vv_i64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, size_t index, size_t vl) { +vint64m8_t test_vrgather_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { return vrgather_vx_i64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t index, - size_t vl) { +vuint8mf8_t test_vrgather_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl) { return vrgather_vv_u8mf8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgather_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, size_t index, size_t vl) { +vuint8mf8_t test_vrgather_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl) { return vrgather_vx_u8mf8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t index, - size_t vl) { +vuint8mf4_t test_vrgather_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl) { return vrgather_vv_u8mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgather_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, size_t index, size_t vl) { +vuint8mf4_t test_vrgather_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl) { return vrgather_vx_u8mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t index, - size_t vl) { +vuint8mf2_t test_vrgather_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl) { return vrgather_vv_u8mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgather_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, size_t index, size_t vl) { +vuint8mf2_t test_vrgather_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl) { return vrgather_vx_u8mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t index, - size_t vl) { +vuint8m1_t test_vrgather_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { return vrgather_vv_u8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, size_t index, size_t vl) { +vuint8m1_t test_vrgather_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { return vrgather_vx_u8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t index, - size_t vl) { +vuint8m2_t test_vrgather_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { return vrgather_vv_u8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, size_t index, size_t vl) { +vuint8m2_t test_vrgather_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { return vrgather_vx_u8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t index, - size_t vl) { +vuint8m4_t test_vrgather_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { return vrgather_vv_u8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, size_t index, size_t vl) { +vuint8m4_t test_vrgather_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { return vrgather_vx_u8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t index, - size_t vl) { +vuint8m8_t test_vrgather_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { return vrgather_vv_u8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, size_t index, size_t vl) { +vuint8m8_t test_vrgather_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { return vrgather_vx_u8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t index, - size_t vl) { +vuint16mf4_t test_vrgather_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl) { return vrgather_vv_u16mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgather_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, size_t index, - size_t vl) { +vuint16mf4_t test_vrgather_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl) { return vrgather_vx_u16mf4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t index, - size_t vl) { +vuint16mf2_t test_vrgather_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl) { return vrgather_vv_u16mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgather_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, size_t index, - size_t vl) { +vuint16mf2_t test_vrgather_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl) { return vrgather_vx_u16mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t index, - size_t vl) { +vuint16m1_t test_vrgather_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { return vrgather_vv_u16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, size_t index, size_t vl) { +vuint16m1_t test_vrgather_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { return vrgather_vx_u16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t index, - size_t vl) { +vuint16m2_t test_vrgather_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { return vrgather_vv_u16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, size_t index, size_t vl) { +vuint16m2_t test_vrgather_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { return vrgather_vx_u16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t index, - size_t vl) { +vuint16m4_t test_vrgather_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { return vrgather_vv_u16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, size_t index, size_t vl) { +vuint16m4_t test_vrgather_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { return vrgather_vx_u16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t index, - size_t vl) { +vuint16m8_t test_vrgather_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { return vrgather_vv_u16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, size_t index, size_t vl) { +vuint16m8_t test_vrgather_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { return vrgather_vx_u16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t index, - size_t vl) { +vuint32mf2_t test_vrgather_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl) { return vrgather_vv_u32mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgather_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, size_t index, - size_t vl) { +vuint32mf2_t test_vrgather_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl) { return vrgather_vx_u32mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t index, - size_t vl) { +vuint32m1_t test_vrgather_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { return vrgather_vv_u32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, size_t index, size_t vl) { +vuint32m1_t test_vrgather_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { return vrgather_vx_u32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t index, - size_t vl) { +vuint32m2_t test_vrgather_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { return vrgather_vv_u32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, size_t index, size_t vl) { +vuint32m2_t test_vrgather_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { return vrgather_vx_u32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t index, - size_t vl) { +vuint32m4_t test_vrgather_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { return vrgather_vv_u32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, size_t index, size_t vl) { +vuint32m4_t test_vrgather_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { return vrgather_vx_u32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t index, - size_t vl) { +vuint32m8_t test_vrgather_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { return vrgather_vv_u32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, size_t index, size_t vl) { +vuint32m8_t test_vrgather_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { return vrgather_vx_u32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t index, - size_t vl) { +vuint64m1_t test_vrgather_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { return vrgather_vv_u64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, size_t index, size_t vl) { +vuint64m1_t test_vrgather_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { return vrgather_vx_u64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t index, - size_t vl) { +vuint64m2_t test_vrgather_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { return vrgather_vv_u64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, size_t index, size_t vl) { +vuint64m2_t test_vrgather_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { return vrgather_vx_u64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t index, - size_t vl) { +vuint64m4_t test_vrgather_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { return vrgather_vv_u64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, size_t index, size_t vl) { +vuint64m4_t test_vrgather_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { return vrgather_vx_u64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t index, - size_t vl) { +vuint64m8_t test_vrgather_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { return vrgather_vv_u64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, size_t index, size_t vl) { +vuint64m8_t test_vrgather_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { return vrgather_vx_u64m8_m(mask, maskedoff, op1, index, vl); } +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { + return vrgather_vv_f16mf4_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { + return vrgather_vx_f16mf4_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { + return vrgather_vv_f16mf2_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { + return vrgather_vx_f16mf2_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return vrgather_vv_f16m1_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vx_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m1_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return vrgather_vv_f16m2_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vx_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m2_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return vrgather_vv_f16m4_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vx_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m4_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return vrgather_vv_f16m8_m(mask, maskedoff, op1, index, vl); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vx_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { + return vrgather_vx_f16m8_m(mask, maskedoff, op1, index, vl); +} + // CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vuint32mf2_t index, - size_t vl) { +vfloat32mf2_t test_vrgather_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl) { return vrgather_vv_f32mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgather_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, size_t index, - size_t vl) { +vfloat32mf2_t test_vrgather_vx_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl) { return vrgather_vx_f32mf2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, vuint32m1_t index, - size_t vl) { +vfloat32m1_t test_vrgather_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { return vrgather_vv_f32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, - vfloat32m1_t op1, size_t index, - size_t vl) { +vfloat32m1_t test_vrgather_vx_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { return vrgather_vx_f32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, vuint32m2_t index, - size_t vl) { +vfloat32m2_t test_vrgather_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { return vrgather_vv_f32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, - vfloat32m2_t op1, size_t index, - size_t vl) { +vfloat32m2_t test_vrgather_vx_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { return vrgather_vx_f32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vuint32m4_t index, - size_t vl) { +vfloat32m4_t test_vrgather_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { return vrgather_vv_f32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, size_t index, - size_t vl) { +vfloat32m4_t test_vrgather_vx_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { return vrgather_vx_f32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vuint32m8_t index, - size_t vl) { +vfloat32m8_t test_vrgather_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { return vrgather_vv_f32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, size_t index, - size_t vl) { +vfloat32m8_t test_vrgather_vx_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { return vrgather_vx_f32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, vuint64m1_t index, - size_t vl) { +vfloat64m1_t test_vrgather_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { return vrgather_vv_f64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, - vfloat64m1_t op1, size_t index, - size_t vl) { +vfloat64m1_t test_vrgather_vx_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { return vrgather_vx_f64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, vuint64m2_t index, - size_t vl) { +vfloat64m2_t test_vrgather_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { return vrgather_vv_f64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, - vfloat64m2_t op1, size_t index, - size_t vl) { +vfloat64m2_t test_vrgather_vx_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { return vrgather_vx_f64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, vuint64m4_t index, - size_t vl) { +vfloat64m4_t test_vrgather_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { return vrgather_vv_f64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, - vfloat64m4_t op1, size_t index, - size_t vl) { +vfloat64m4_t test_vrgather_vx_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { return vrgather_vx_f64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vuint64m8_t index, - size_t vl) { +vfloat64m8_t test_vrgather_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { return vrgather_vv_f64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, size_t index, - size_t vl) { +vfloat64m8_t test_vrgather_vx_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { return vrgather_vx_f64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrgatherei16_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vuint16mf4_t op2, - size_t vl) { +vint8mf8_t test_vrgatherei16_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrgatherei16_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vuint16mf2_t op2, - size_t vl) { +vint8mf4_t test_vrgatherei16_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrgatherei16_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vuint16m1_t op2, - size_t vl) { +vint8mf2_t test_vrgatherei16_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgatherei16_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vuint16m2_t op2, - size_t vl) { +vint8m1_t test_vrgatherei16_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgatherei16_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vuint16m4_t op2, - size_t vl) { +vint8m2_t test_vrgatherei16_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgatherei16_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vuint16m8_t op2, - size_t vl) { +vint8m4_t test_vrgatherei16_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrgatherei16_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vrgatherei16_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrgatherei16_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vrgatherei16_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgatherei16_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vint16m1_t test_vrgatherei16_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgatherei16_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vint16m2_t test_vrgatherei16_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgatherei16_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vint16m4_t test_vrgatherei16_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgatherei16_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vint16m8_t test_vrgatherei16_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrgatherei16_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgatherei16_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgatherei16_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgatherei16_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgatherei16_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgatherei16_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgatherei16_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgatherei16_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vuint16m1_t op2, - size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgatherei16_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vuint16m2_t op2, - size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrgatherei16_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint16mf4_t op2, - size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrgatherei16_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint16mf2_t op2, - size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrgatherei16_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint16m1_t op2, - size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgatherei16_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint16m2_t op2, - size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgatherei16_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint16m4_t op2, - size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgatherei16_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint16m8_t op2, - size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrgatherei16_vv_u16mf4_m(vbool64_t mask, - vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrgatherei16_vv_u16mf2_m(vbool32_t mask, - vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgatherei16_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgatherei16_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgatherei16_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgatherei16_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vrgatherei16_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrgatherei16_vv_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgatherei16_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgatherei16_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgatherei16_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgatherei16_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgatherei16_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgatherei16_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgatherei16_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint16m1_t op2, - size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgatherei16_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint16m2_t op2, - size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { + return vrgatherei16_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { + return vrgatherei16_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgatherei16_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { + return vrgatherei16_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgatherei16_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { + return vrgatherei16_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgatherei16_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { + return vrgatherei16_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgatherei16_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { + return vrgatherei16_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +} + // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m(vbool64_t mask, - vfloat32mf2_t maskedoff, - vfloat32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_f32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgatherei16_vv_f32m1_m(vbool32_t mask, - vfloat32m1_t maskedoff, - vfloat32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgatherei16_vv_f32m2_m(vbool16_t mask, - vfloat32m2_t maskedoff, - vfloat32m2_t op1, vuint16m1_t op2, - size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgatherei16_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, - vfloat32m4_t op1, vuint16m2_t op2, - size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgatherei16_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, - vfloat32m8_t op1, vuint16m4_t op2, - size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl) { return vrgatherei16_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgatherei16_vv_f64m1_m(vbool64_t mask, - vfloat64m1_t maskedoff, - vfloat64m1_t op1, vuint16mf4_t op2, - size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl) { return vrgatherei16_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgatherei16_vv_f64m2_m(vbool32_t mask, - vfloat64m2_t maskedoff, - vfloat64m2_t op1, vuint16mf2_t op2, - size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl) { return vrgatherei16_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgatherei16_vv_f64m4_m(vbool16_t mask, - vfloat64m4_t maskedoff, - vfloat64m4_t op1, vuint16m1_t op2, - size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl) { return vrgatherei16_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgatherei16_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, - vfloat64m8_t op1, vuint16m2_t op2, - size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl) { return vrgatherei16_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv1f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4(op1, index, vl); +vint8mf8_t test_vrgather_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8mf8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv1f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vx_f16mf4 (vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4(op1, index, vl); +vint8mf8_t test_vrgather_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8mf8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv2f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2(op1, index, vl); +vint8mf4_t test_vrgather_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv2f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vx_f16mf2 (vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2(op1, index, vl); +vint8mf4_t test_vrgather_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv4f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1(op1, index, vl); +vint8mf2_t test_vrgather_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv4f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vx_f16m1 (vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1(op1, index, vl); +vint8mf2_t test_vrgather_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv8f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2(op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv8f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vx_f16m2 (vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2(op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv16f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4(op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv16f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vx_f16m4 (vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4(op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.nxv32f16.i64( [[OP1:%.*]], [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8(op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.nxv32f16.i64( [[OP1:%.*]], i64 [[INDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vx_f16m8 (vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8(op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4( +// CHECK-RV64-LABEL: @test_vrgather_vv_i8m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv1f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgatherei16_vv_f16mf4 (vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4(op1, op2, vl); +vint8m8_t test_vrgather_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i8m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2( +// CHECK-RV64-LABEL: @test_vrgather_vx_i8m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv2f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgatherei16_vv_f16mf2 (vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2(op1, op2, vl); +vint8m8_t test_vrgather_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i8m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1( +// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv4f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgatherei16_vv_f16m1 (vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1(op1, op2, vl); +vint16mf4_t test_vrgather_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i16mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2( +// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv8f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgatherei16_vv_f16m2 (vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2(op1, op2, vl); +vint16mf4_t test_vrgather_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i16mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4( +// CHECK-RV64-LABEL: @test_vrgather_vv_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv16f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgatherei16_vv_f16m4 (vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4(op1, op2, vl); +vint16mf2_t test_vrgather_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i16mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8( +// CHECK-RV64-LABEL: @test_vrgather_vx_i16mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.nxv32f16.i64( [[OP1:%.*]], [[OP2:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgatherei16_vv_f16m8 (vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8(op1, op2, vl); +vint16mf2_t test_vrgather_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i16mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl) { - return vrgather_vv_f16mf4_m(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i16m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgather_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf4_m(mask, maskedoff, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i16m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl) { - return vrgather_vv_f16mf2_m(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i16m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgather_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16mf2_m(mask, maskedoff, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i16m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return vrgather_vv_f16m1_m(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i16m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vx_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m1_m(mask, maskedoff, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i16m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return vrgather_vv_f16m2_m(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i16m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i16m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vx_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m2_m(mask, maskedoff, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i16m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return vrgather_vv_f16m4_m(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i32mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i32mf2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vx_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m4_m(mask, maskedoff, op1, index, vl); +vint32mf2_t test_vrgather_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i32mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return vrgather_vv_f16m8_m(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i32m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m1_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vx_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { - return vrgather_vx_f16m8_m(mask, maskedoff, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i32m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl) { - return vrgatherei16_vv_f16mf4_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgather_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i32m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m2_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl) { - return vrgatherei16_vv_f16mf2_m(mask, maskedoff, op1, op2, vl); +vint32m2_t test_vrgather_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i32m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgatherei16_vv_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl) { - return vrgatherei16_vv_f16m1_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgather_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i32m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m4_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgatherei16_vv_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl) { - return vrgatherei16_vv_f16m2_m(mask, maskedoff, op1, op2, vl); +vint32m4_t test_vrgather_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i32m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_m( +// CHECK-RV64-LABEL: @test_vrgather_vv_i32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgatherei16_vv_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl) { - return vrgatherei16_vv_f16m4_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgather_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i32m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); } -// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_m( +// CHECK-RV64-LABEL: @test_vrgather_vx_i32m8_mt( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgatherei16_vv_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl) { - return vrgatherei16_vv_f16m8_m(mask, maskedoff, op1, op2, vl); +vint32m8_t test_vrgather_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i32m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrgather_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i64m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrgather_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i64m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrgather_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i64m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrgather_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i64m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrgather_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i64m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrgather_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i64m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrgather_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_i64m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrgather_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_i64m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrgather_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8mf8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrgather_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8mf8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrgather_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrgather_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrgather_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrgather_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrgather_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrgather_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrgather_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrgather_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrgather_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrgather_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrgather_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u8m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrgather_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u8m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrgather_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u16mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrgather_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u16mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrgather_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u16mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrgather_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u16mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrgather_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u16m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrgather_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u16m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrgather_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u16m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrgather_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u16m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrgather_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u16m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrgather_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u16m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrgather_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u16m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrgather_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u16m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u32mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgather_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u32mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrgather_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u32m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrgather_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u32m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrgather_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u32m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrgather_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u32m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrgather_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u32m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrgather_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u32m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrgather_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u32m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrgather_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u32m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrgather_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u64m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrgather_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u64m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrgather_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u64m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrgather_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u64m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrgather_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u64m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrgather_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u64m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrgather_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_u64m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrgather_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_u64m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f16mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgather_vx_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f16mf4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f16mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgather_vx_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f16mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f16m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgather_vx_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f16m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f16m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgather_vx_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f16m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f16m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgather_vx_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f16m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f16m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgather_vx_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f16m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint32mf2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f32mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgather_vx_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f32mf2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vrgather_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f32m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vrgather_vx_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f32m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vrgather_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f32m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vrgather_vx_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f32m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vrgather_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f32m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vrgather_vx_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f32m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vrgather_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f32m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vrgather_vx_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f32m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vrgather_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f64m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vrgather_vx_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f64m1_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vrgather_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f64m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vrgather_vx_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f64m2_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vrgather_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f64m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vrgather_vx_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f64m4_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vrgather_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl, uint8_t ta) { + return vrgather_vv_f64m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgather_vx_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[INDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vrgather_vx_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl, uint8_t ta) { + return vrgather_vx_f64m8_mt(mask, maskedoff, op1, index, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrgatherei16_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrgatherei16_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrgatherei16_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrgatherei16_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrgatherei16_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrgatherei16_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); } + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrgatherei16_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrgatherei16_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrgatherei16_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrgatherei16_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrgatherei16_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrgatherei16_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrgatherei16_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrgatherei16_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrgatherei16_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrgatherei16_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrgatherei16_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrgatherei16_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrgatherei16_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrgatherei16_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrgatherei16_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrgatherei16_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrgatherei16_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrgatherei16_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrgatherei16_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrgatherei16_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrgatherei16_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrgatherei16_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrgatherei16_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrgatherei16_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrgatherei16_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrgatherei16_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrgatherei16_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrgatherei16_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrgatherei16_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrgatherei16_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrgatherei16_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrgatherei16_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrgatherei16_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrgatherei16_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrgatherei16_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrgatherei16_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mt (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mt (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vrgatherei16_vv_f16m1_mt (vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vrgatherei16_vv_f16m2_mt (vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vrgatherei16_vv_f16m4_mt (vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vrgatherei16_vv_f16m8_mt (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mt (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vrgatherei16_vv_f32m1_mt (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vrgatherei16_vv_f32m2_mt (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vrgatherei16_vv_f32m4_mt (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vrgatherei16_vv_f32m8_mt (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vrgatherei16_vv_f64m1_mt (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vrgatherei16_vv_f64m2_mt (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vrgatherei16_vv_f64m4_mt (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrgatherei16_vv_f64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrgatherei16.vv.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vrgatherei16_vv_f64m8_mt (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vrgatherei16_vv_f64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c index 73cadfd..678c4dc 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrsub.c @@ -396,402 +396,799 @@ vuint64m4_t test_vrsub_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vrsub_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vrsub_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vrsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vrsub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vrsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vrsub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vrsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vrsub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vrsub_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vrsub_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vrsub_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vrsub_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vrsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vrsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vrsub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vrsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vrsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vrsub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vrsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vrsub_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vrsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vrsub_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vrsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vrsub_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vrsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vrsub_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vrsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vrsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vrsub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vrsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vrsub_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vrsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vrsub_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vrsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vrsub_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vrsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vrsub_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vrsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vrsub_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vrsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vrsub_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vrsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vrsub_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vrsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vrsub_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vrsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vrsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vrsub_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vrsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vrsub_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vrsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vrsub_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vrsub_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vrsub_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vrsub_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vrsub_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vrsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vrsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vrsub_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vrsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vrsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vrsub_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vrsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vrsub_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vrsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vrsub_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vrsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vrsub_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vrsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vrsub_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vrsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vrsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vrsub_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vrsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vrsub_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vrsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vrsub_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vrsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vrsub_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vrsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vrsub_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vrsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vrsub_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vrsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vrsub_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vrsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vrsub_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vrsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vrsub_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vrsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vrsub_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vrsub_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vrsub_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vrsub_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vrsub_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vrsub_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vrsub_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vrsub_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vrsub_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vrsub_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vrsub_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vrsub_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vrsub_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vrsub_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vrsub_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vrsub_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vrsub_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vrsub_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vrsub_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vrsub_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vrsub_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vrsub_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vrsub_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vrsub_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vrsub_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vrsub_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vrsub_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vrsub_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vrsub_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vrsub_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vrsub_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vrsub_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vrsub_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vrsub_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vrsub_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vrsub_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vrsub_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vrsub_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vrsub_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vrsub_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vrsub_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vrsub_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vrsub_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vrsub_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vrsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vrsub_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vrsub_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c index 5b843e1..dcbc185 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsadd.c @@ -795,910 +795,1591 @@ vuint64m8_t test_vsaddu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vsaddu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vsadd_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vsadd_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vsadd_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vsadd_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vsadd_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vsadd_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vsadd_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vsadd_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vsadd_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vsadd_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vsadd_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vsadd_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vsadd_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vsadd_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vsadd_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vsadd_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vsadd_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vsadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vsadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vsadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vsadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vsadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vsadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vsadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vsadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vsadd_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vsadd_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vsadd_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vsadd_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vsadd_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vsadd_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vsadd_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vsadd_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vsadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vsadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vsadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vsadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vsadd_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vsadd_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vsadd_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vsadd_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vsadd_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vsadd_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vsadd_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vsadd_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vsadd_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vsadd_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vsadd_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vsadd_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vsadd_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vsadd_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vsadd_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vsadd_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vsaddu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vsaddu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsaddu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vsaddu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vsaddu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vsaddu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsaddu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vsaddu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vsaddu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vsaddu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsaddu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vsaddu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vsaddu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vsaddu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vsaddu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vsaddu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vsaddu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vsaddu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vsaddu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vsaddu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vsaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vsaddu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vsaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vsaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vsaddu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vsaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vsaddu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vsaddu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vsaddu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vsaddu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vsaddu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vsaddu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vsaddu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vsaddu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vsaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vsaddu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vsaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vsaddu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vsaddu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vsaddu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vsaddu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vsaddu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vsaddu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vsaddu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vsaddu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vsaddu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vsaddu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vsaddu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vsaddu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vsaddu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vsaddu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vsaddu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsadd_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsadd_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsadd_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsadd_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsadd_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsadd_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsadd_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsadd_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsadd_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsadd_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsadd_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsadd_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsadd_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsadd_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsadd_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsadd_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsadd_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsadd_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsadd_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsadd_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsadd_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vsadd_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsadd_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsadd_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsadd_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsaddu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsaddu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsaddu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsaddu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsaddu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsaddu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsaddu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsaddu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsaddu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsaddu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsaddu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsaddu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsaddu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsaddu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsaddu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsaddu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsaddu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsaddu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsaddu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsaddu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsaddu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vsaddu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsaddu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsaddu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsaddu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c index 23ad316..0e80bb1 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c @@ -2,7 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ -// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -446,6 +447,66 @@ void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { return vse64_v_u64m8(base, value, vl); } +// CHECK-RV64-LABEL: @test_vse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) { + return vse16_v_f16mf4(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) { + return vse16_v_f16mf2(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { + return vse16_v_f16m1(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) { + return vse16_v_f16m2(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) { + return vse16_v_f16m4(base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) { + return vse16_v_f16m8(base, value, vl); +} + // CHECK-RV64-LABEL: @test_vse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -976,6 +1037,66 @@ void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size return vse64_v_u64m8_m(mask, base, value, vl); } +// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { + return vse16_v_f16mf4_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { + return vse16_v_f16mf2_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m1_m(vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { + return vse16_v_f16m1_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m2_m(vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { + return vse16_v_f16m2_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m4_m(vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { + return vse16_v_f16m4_m(mask, base, value, vl); +} + +// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_f16m8_m(vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { + return vse16_v_f16m8_m(mask, base, value, vl); +} + // CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1135,123 +1256,3 @@ void test_vse1_v_b32(uint8_t *base, vbool32_t value, size_t vl) { void test_vse1_v_b64(uint8_t *base, vbool64_t value, size_t vl) { return vse1_v_b64(base, value, vl); } - -// CHECK-RV64-LABEL: @test_vse16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16mf4(_Float16 *base, vfloat16mf4_t value, size_t vl) { - return vse16_v_f16mf4(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16mf2(_Float16 *base, vfloat16mf2_t value, size_t vl) { - return vse16_v_f16mf2(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m1(_Float16 *base, vfloat16m1_t value, size_t vl) { - return vse16_v_f16m1(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m2(_Float16 *base, vfloat16m2_t value, size_t vl) { - return vse16_v_f16m2(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m4(_Float16 *base, vfloat16m4_t value, size_t vl) { - return vse16_v_f16m4(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m8(_Float16 *base, vfloat16m8_t value, size_t vl) { - return vse16_v_f16m8(base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t value, size_t vl) { - return vse16_v_f16mf4_m (mask, base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t value, size_t vl) { - return vse16_v_f16mf2_m (mask, base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t value, size_t vl) { - return vse16_v_f16m1_m (mask, base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t value, size_t vl) { - return vse16_v_f16m2_m (mask, base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t value, size_t vl) { - return vse16_v_f16m4_m (mask, base, value, vl); -} - -// CHECK-RV64-LABEL: @test_vse16_v_f16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vse16_v_f16m8_m (vbool2_t mask, _Float16 *base, vfloat16m8_t value, size_t vl) { - return vse16_v_f16m8_m (mask, base, value, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c index 30ef198..cd62f43 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvl.c @@ -1,270 +1,205 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -emit-llvm -o - %s \ -// RUN: | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg \ +// RUN: | FileCheck --check-prefix=CHECK-RV64 %s #include +// CHECK-RV64-LABEL: @test_vsetvl_e8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 5) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +size_t test_vsetvl_e8mf8 (size_t avl) { + return vsetvl_e8mf8(avl); +} + +// CHECK-RV64-LABEL: @test_vsetvl_e8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 6) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +size_t test_vsetvl_e8mf4 (size_t avl) { + return vsetvl_e8mf4(avl); +} + +// CHECK-RV64-LABEL: @test_vsetvl_e8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 7) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +size_t test_vsetvl_e8mf2 (size_t avl) { + return vsetvl_e8mf2(avl); +} + // CHECK-RV64-LABEL: @test_vsetvl_e8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 0) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 0) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e8m1(size_t avl) { +size_t test_vsetvl_e8m1 (size_t avl) { return vsetvl_e8m1(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 1) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 1) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e8m2(size_t avl) { +size_t test_vsetvl_e8m2 (size_t avl) { return vsetvl_e8m2(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 2) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 2) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e8m4(size_t avl) { +size_t test_vsetvl_e8m4 (size_t avl) { return vsetvl_e8m4(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 3) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 0, i64 3) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e8m8(size_t avl) { +size_t test_vsetvl_e8m8 (size_t avl) { return vsetvl_e8m8(avl); } -// CHECK-RV64-LABEL: @test_vsetvl_e8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 7) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] -// -size_t test_vsetvl_e8mf2(size_t avl) { - return vsetvl_e8mf2(avl); -} - -// CHECK-RV64-LABEL: @test_vsetvl_e8mf4( +// CHECK-RV64-LABEL: @test_vsetvl_e16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 6) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 1, i64 6) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e8mf4(size_t avl) { - return vsetvl_e8mf4(avl); +size_t test_vsetvl_e16mf4 (size_t avl) { + return vsetvl_e16mf4(avl); } -// CHECK-RV64-LABEL: @test_vsetvl_e8mf8( +// CHECK-RV64-LABEL: @test_vsetvl_e16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 0, i64 5) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 1, i64 7) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e8mf8(size_t avl) { - return vsetvl_e8mf8(avl); +size_t test_vsetvl_e16mf2 (size_t avl) { + return vsetvl_e16mf2(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 0) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 1, i64 0) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e16m1(size_t avl) { +size_t test_vsetvl_e16m1 (size_t avl) { return vsetvl_e16m1(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 1) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 1, i64 1) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e16m2(size_t avl) { +size_t test_vsetvl_e16m2 (size_t avl) { return vsetvl_e16m2(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 2) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 1, i64 2) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e16m4(size_t avl) { +size_t test_vsetvl_e16m4 (size_t avl) { return vsetvl_e16m4(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 3) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 1, i64 3) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e16m8(size_t avl) { +size_t test_vsetvl_e16m8 (size_t avl) { return vsetvl_e16m8(avl); } -// CHECK-RV64-LABEL: @test_vsetvl_e16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 7) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] -// -size_t test_vsetvl_e16mf2(size_t avl) { - return vsetvl_e16mf2(avl); -} - -// CHECK-RV64-LABEL: @test_vsetvl_e16mf4( +// CHECK-RV64-LABEL: @test_vsetvl_e32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 1, i64 6) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 2, i64 7) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e16mf4(size_t avl) { - return vsetvl_e16mf4(avl); +size_t test_vsetvl_e32mf2 (size_t avl) { + return vsetvl_e32mf2(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 0) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 2, i64 0) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e32m1(size_t avl) { +size_t test_vsetvl_e32m1 (size_t avl) { return vsetvl_e32m1(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 1) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 2, i64 1) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e32m2(size_t avl) { +size_t test_vsetvl_e32m2 (size_t avl) { return vsetvl_e32m2(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 2) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 2, i64 2) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e32m4(size_t avl) { +size_t test_vsetvl_e32m4 (size_t avl) { return vsetvl_e32m4(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 3) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 2, i64 3) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e32m8(size_t avl) { +size_t test_vsetvl_e32m8 (size_t avl) { return vsetvl_e32m8(avl); } -// CHECK-RV64-LABEL: @test_vsetvl_e32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 2, i64 7) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] -// -size_t test_vsetvl_e32mf2(size_t avl) { - return vsetvl_e32mf2(avl); -} - // CHECK-RV64-LABEL: @test_vsetvl_e64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 0) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 3, i64 0) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e64m1(size_t avl) { +size_t test_vsetvl_e64m1 (size_t avl) { return vsetvl_e64m1(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 1) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 3, i64 1) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e64m2(size_t avl) { +size_t test_vsetvl_e64m2 (size_t avl) { return vsetvl_e64m2(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 2) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 3, i64 2) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e64m4(size_t avl) { +size_t test_vsetvl_e64m4 (size_t avl) { return vsetvl_e64m4(avl); } // CHECK-RV64-LABEL: @test_vsetvl_e64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[AVL_ADDR:%.*]] = alloca i64, align 8 -// CHECK-RV64-NEXT: store i64 [[AVL:%.*]], i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP0:%.*]] = load i64, i64* [[AVL_ADDR]], align 8 -// CHECK-RV64-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[TMP0]], i64 3, i64 3) -// CHECK-RV64-NEXT: ret i64 [[TMP1]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 [[AVL:%.*]], i64 3, i64 3) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvl_e64m8(size_t avl) { +size_t test_vsetvl_e64m8 (size_t avl) { return vsetvl_e64m8(avl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c index 0aef38c..56a58ab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsetvlmax.c @@ -5,12 +5,39 @@ #include +// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 5) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +size_t test_vsetvlmax_e8mf8 () { + return vsetvlmax_e8mf8(); +} + +// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 6) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +size_t test_vsetvlmax_e8mf4 () { + return vsetvlmax_e8mf4(); +} + +// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 7) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +size_t test_vsetvlmax_e8mf2 () { + return vsetvlmax_e8mf2(); +} + // CHECK-RV64-LABEL: @test_vsetvlmax_e8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 0) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e8m1() { +size_t test_vsetvlmax_e8m1 () { return vsetvlmax_e8m1(); } @@ -19,7 +46,7 @@ size_t test_vsetvlmax_e8m1() { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 1) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e8m2() { +size_t test_vsetvlmax_e8m2 () { return vsetvlmax_e8m2(); } @@ -37,35 +64,26 @@ size_t test_vsetvlmax_e8m4() { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 3) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e8m8() { +size_t test_vsetvlmax_e8m8 () { return vsetvlmax_e8m8(); } -// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 7) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -size_t test_vsetvlmax_e8mf2() { - return vsetvlmax_e8mf2(); -} - -// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf4( +// CHECK-RV64-LABEL: @test_vsetvlmax_e16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 6) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e8mf4() { - return vsetvlmax_e8mf4(); +size_t test_vsetvlmax_e16mf4 () { + return vsetvlmax_e16mf4(); } -// CHECK-RV64-LABEL: @test_vsetvlmax_e8mf8( +// CHECK-RV64-LABEL: @test_vsetvlmax_e16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 0, i64 5) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 7) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e8mf8() { - return vsetvlmax_e8mf8(); +size_t test_vsetvlmax_e16mf2 () { + return vsetvlmax_e16mf2(); } // CHECK-RV64-LABEL: @test_vsetvlmax_e16m1( @@ -100,26 +118,17 @@ size_t test_vsetvlmax_e16m4() { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 3) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e16m8() { +size_t test_vsetvlmax_e16m8 () { return vsetvlmax_e16m8(); } -// CHECK-RV64-LABEL: @test_vsetvlmax_e16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 7) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -size_t test_vsetvlmax_e16mf2() { - return vsetvlmax_e16mf2(); -} - -// CHECK-RV64-LABEL: @test_vsetvlmax_e16mf4( +// CHECK-RV64-LABEL: @test_vsetvlmax_e32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 6) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 7) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -size_t test_vsetvlmax_e16mf4() { - return vsetvlmax_e16mf4(); +size_t test_vsetvlmax_e32mf2 () { + return vsetvlmax_e32mf2(); } // CHECK-RV64-LABEL: @test_vsetvlmax_e32m1( @@ -158,15 +167,6 @@ size_t test_vsetvlmax_e32m8() { return vsetvlmax_e32m8(); } -// CHECK-RV64-LABEL: @test_vsetvlmax_e32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 2, i64 7) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] -// -size_t test_vsetvlmax_e32mf2() { - return vsetvlmax_e32mf2(); -} - // CHECK-RV64-LABEL: @test_vsetvlmax_e64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 3, i64 0) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c index da26966..59f6e00 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c @@ -252,286 +252,511 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) { +vint64m8_t test_vsext_vf2_i64m8 (vint32m4_t op1, size_t vl) { return vsext_vf2_i64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, size_t vl) { +vint16mf4_t test_vsext_vf2_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl) { return vsext_vf2_i16mf4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, size_t vl) { +vint16mf2_t test_vsext_vf2_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl) { return vsext_vf2_i16mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, size_t vl) { +vint16m1_t test_vsext_vf2_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl) { return vsext_vf2_i16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, size_t vl) { +vint16m2_t test_vsext_vf2_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl) { return vsext_vf2_i16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, size_t vl) { +vint16m4_t test_vsext_vf2_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl) { return vsext_vf2_i16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, size_t vl) { +vint16m8_t test_vsext_vf2_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl) { return vsext_vf2_i16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint8mf8_t op1, size_t vl) { +vint32mf2_t test_vsext_vf4_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { return vsext_vf4_i32mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint8mf4_t op1, size_t vl) { +vint32m1_t test_vsext_vf4_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { return vsext_vf4_i32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint8mf2_t op1, size_t vl) { +vint32m2_t test_vsext_vf4_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { return vsext_vf4_i32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint8m1_t op1, size_t vl) { +vint32m4_t test_vsext_vf4_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { return vsext_vf4_i32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint8m2_t op1, size_t vl) { +vint32m8_t test_vsext_vf4_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { return vsext_vf4_i32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint8mf8_t op1, size_t vl) { +vint64m1_t test_vsext_vf8_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { return vsext_vf8_i64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint8mf4_t op1, size_t vl) { +vint64m2_t test_vsext_vf8_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { return vsext_vf8_i64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint8mf2_t op1, size_t vl) { +vint64m4_t test_vsext_vf8_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { return vsext_vf8_i64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint8m1_t op1, size_t vl) { +vint64m8_t test_vsext_vf8_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { return vsext_vf8_i64m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, size_t vl) { +vint32mf2_t test_vsext_vf2_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl) { return vsext_vf2_i32mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, size_t vl) { +vint32m1_t test_vsext_vf2_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl) { return vsext_vf2_i32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, size_t vl) { +vint32m2_t test_vsext_vf2_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl) { return vsext_vf2_i32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, size_t vl) { +vint32m4_t test_vsext_vf2_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl) { return vsext_vf2_i32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, size_t vl) { +vint32m8_t test_vsext_vf2_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl) { return vsext_vf2_i32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint16mf4_t op1, size_t vl) { +vint64m1_t test_vsext_vf4_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { return vsext_vf4_i64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint16mf2_t op1, size_t vl) { +vint64m2_t test_vsext_vf4_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { return vsext_vf4_i64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint16m1_t op1, size_t vl) { +vint64m4_t test_vsext_vf4_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { return vsext_vf4_i64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint16m2_t op1, size_t vl) { +vint64m8_t test_vsext_vf4_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { return vsext_vf4_i64m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, size_t vl) { +vint64m1_t test_vsext_vf2_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl) { return vsext_vf2_i64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, size_t vl) { +vint64m2_t test_vsext_vf2_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl) { return vsext_vf2_i64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, size_t vl) { +vint64m4_t test_vsext_vf2_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl) { return vsext_vf2_i64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, size_t vl) { +vint64m8_t test_vsext_vf2_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl) { return vsext_vf2_i64m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsext_vf2_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsext_vf2_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsext_vf2_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsext_vf2_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsext_vf2_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsext_vf2_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl, uint8_t ta) { + return vsext_vf8_i64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl, uint8_t ta) { + return vsext_vf8_i64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl, uint8_t ta) { + return vsext_vf8_i64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl, uint8_t ta) { + return vsext_vf8_i64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf2_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf2_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf2_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf2_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf2_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl, uint8_t ta) { + return vsext_vf4_i64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf2_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf2_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf2_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf2_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, size_t vl, uint8_t ta) { + return vsext_vf2_i64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c index 8171926..d4b7eab 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1down.c @@ -416,490 +416,799 @@ vuint64m4_t test_vslide1down_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8 (vuint64m8_t src, uint64_t value, size_t vl) { return vslide1down_vx_u64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1down_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t src, int8_t value, - size_t vl) { +vint8mf8_t test_vslide1down_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { return vslide1down_vx_i8mf8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1down_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t src, int8_t value, - size_t vl) { +vint8mf4_t test_vslide1down_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { return vslide1down_vx_i8mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1down_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t src, int8_t value, - size_t vl) { +vint8mf2_t test_vslide1down_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { return vslide1down_vx_i8mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t src, int8_t value, size_t vl) { +vint8m1_t test_vslide1down_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { return vslide1down_vx_i8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t src, int8_t value, size_t vl) { +vint8m2_t test_vslide1down_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { return vslide1down_vx_i8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t src, int8_t value, size_t vl) { +vint8m4_t test_vslide1down_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { return vslide1down_vx_i8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t src, int8_t value, size_t vl) { +vint8m8_t test_vslide1down_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { return vslide1down_vx_i8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1down_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t src, int16_t value, - size_t vl) { +vint16mf4_t test_vslide1down_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { return vslide1down_vx_i16mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1down_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t src, int16_t value, - size_t vl) { +vint16mf2_t test_vslide1down_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { return vslide1down_vx_i16mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t src, int16_t value, - size_t vl) { +vint16m1_t test_vslide1down_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { return vslide1down_vx_i16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t src, int16_t value, - size_t vl) { +vint16m2_t test_vslide1down_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { return vslide1down_vx_i16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t src, int16_t value, - size_t vl) { +vint16m4_t test_vslide1down_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { return vslide1down_vx_i16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t src, int16_t value, - size_t vl) { +vint16m8_t test_vslide1down_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { return vslide1down_vx_i16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1down_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t src, int32_t value, - size_t vl) { +vint32mf2_t test_vslide1down_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { return vslide1down_vx_i32mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t src, int32_t value, - size_t vl) { +vint32m1_t test_vslide1down_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { return vslide1down_vx_i32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t src, int32_t value, - size_t vl) { +vint32m2_t test_vslide1down_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { return vslide1down_vx_i32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t src, int32_t value, - size_t vl) { +vint32m4_t test_vslide1down_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { return vslide1down_vx_i32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t src, int32_t value, - size_t vl) { +vint32m8_t test_vslide1down_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { return vslide1down_vx_i32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t src, int64_t value, - size_t vl) { +vint64m1_t test_vslide1down_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { return vslide1down_vx_i64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t src, int64_t value, - size_t vl) { +vint64m2_t test_vslide1down_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { return vslide1down_vx_i64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t src, int64_t value, - size_t vl) { +vint64m4_t test_vslide1down_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { return vslide1down_vx_i64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t src, int64_t value, - size_t vl) { +vint64m8_t test_vslide1down_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { return vslide1down_vx_i64m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1down_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t src, uint8_t value, - size_t vl) { +vuint8mf8_t test_vslide1down_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8mf8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1down_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t src, uint8_t value, - size_t vl) { +vuint8mf4_t test_vslide1down_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1down_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t src, uint8_t value, - size_t vl) { +vuint8mf2_t test_vslide1down_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t src, uint8_t value, - size_t vl) { +vuint8m1_t test_vslide1down_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t src, uint8_t value, - size_t vl) { +vuint8m2_t test_vslide1down_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t src, uint8_t value, - size_t vl) { +vuint8m4_t test_vslide1down_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t src, uint8_t value, - size_t vl) { +vuint8m8_t test_vslide1down_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { return vslide1down_vx_u8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1down_vx_u16mf4_m(vbool64_t mask, - vuint16mf4_t maskedoff, - vuint16mf4_t src, uint16_t value, - size_t vl) { +vuint16mf4_t test_vslide1down_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { return vslide1down_vx_u16mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1down_vx_u16mf2_m(vbool32_t mask, - vuint16mf2_t maskedoff, - vuint16mf2_t src, uint16_t value, - size_t vl) { +vuint16mf2_t test_vslide1down_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { return vslide1down_vx_u16mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t src, uint16_t value, - size_t vl) { +vuint16m1_t test_vslide1down_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { return vslide1down_vx_u16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t src, uint16_t value, - size_t vl) { +vuint16m2_t test_vslide1down_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { return vslide1down_vx_u16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t src, uint16_t value, - size_t vl) { +vuint16m4_t test_vslide1down_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { return vslide1down_vx_u16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t src, uint16_t value, - size_t vl) { +vuint16m8_t test_vslide1down_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { return vslide1down_vx_u16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1down_vx_u32mf2_m(vbool64_t mask, - vuint32mf2_t maskedoff, - vuint32mf2_t src, uint32_t value, - size_t vl) { +vuint32mf2_t test_vslide1down_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { return vslide1down_vx_u32mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t src, uint32_t value, - size_t vl) { +vuint32m1_t test_vslide1down_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { return vslide1down_vx_u32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t src, uint32_t value, - size_t vl) { +vuint32m2_t test_vslide1down_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { return vslide1down_vx_u32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t src, uint32_t value, - size_t vl) { +vuint32m4_t test_vslide1down_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { return vslide1down_vx_u32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t src, uint32_t value, - size_t vl) { +vuint32m8_t test_vslide1down_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { return vslide1down_vx_u32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t src, uint64_t value, - size_t vl) { +vuint64m1_t test_vslide1down_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { return vslide1down_vx_u64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t src, uint64_t value, - size_t vl) { +vuint64m2_t test_vslide1down_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { return vslide1down_vx_u64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t src, uint64_t value, - size_t vl) { +vuint64m4_t test_vslide1down_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { return vslide1down_vx_u64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { return vslide1down_vx_u64m8_m(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslide1down_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8mf8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslide1down_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslide1down_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslide1down_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslide1down_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslide1down_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslide1down_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i8m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslide1down_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i16mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslide1down_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i16mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslide1down_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i16m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslide1down_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i16m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslide1down_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i16m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslide1down_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i16m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1down_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i32mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslide1down_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i32m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslide1down_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i32m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslide1down_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i32m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslide1down_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i32m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslide1down_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i64m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslide1down_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i64m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslide1down_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i64m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslide1down_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_i64m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslide1down_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8mf8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslide1down_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslide1down_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslide1down_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslide1down_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslide1down_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslide1down_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u8m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslide1down_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u16mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslide1down_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u16mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslide1down_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u16m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslide1down_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u16m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslide1down_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u16m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslide1down_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u16m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1down_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u32mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslide1down_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u32m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslide1down_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u32m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslide1down_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u32m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslide1down_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u32m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslide1down_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u64m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslide1down_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u64m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslide1down_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u64m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1down_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslide1down_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1down_vx_u64m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c index 0c154d5..e976852 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslide1up.c @@ -413,468 +413,799 @@ vuint64m4_t test_vslide1up_vx_u64m4(vuint64m4_t src, uint64_t value, // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.nxv8i64.i64.i64( [[SRC:%.*]], i64 [[VALUE:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8 (vuint64m8_t src, uint64_t value, size_t vl) { return vslide1up_vx_u64m8(src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vslide1up_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t src, int8_t value, size_t vl) { +vint8mf8_t test_vslide1up_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl) { return vslide1up_vx_i8mf8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vslide1up_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t src, int8_t value, size_t vl) { +vint8mf4_t test_vslide1up_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl) { return vslide1up_vx_i8mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vslide1up_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t src, int8_t value, size_t vl) { +vint8mf2_t test_vslide1up_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl) { return vslide1up_vx_i8mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t src, int8_t value, size_t vl) { +vint8m1_t test_vslide1up_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { return vslide1up_vx_i8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t src, int8_t value, size_t vl) { +vint8m2_t test_vslide1up_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { return vslide1up_vx_i8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t src, int8_t value, size_t vl) { +vint8m4_t test_vslide1up_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { return vslide1up_vx_i8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t src, int8_t value, size_t vl) { +vint8m8_t test_vslide1up_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { return vslide1up_vx_i8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vslide1up_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t src, int16_t value, - size_t vl) { +vint16mf4_t test_vslide1up_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl) { return vslide1up_vx_i16mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vslide1up_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t src, int16_t value, - size_t vl) { +vint16mf2_t test_vslide1up_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl) { return vslide1up_vx_i16mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t src, int16_t value, size_t vl) { +vint16m1_t test_vslide1up_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { return vslide1up_vx_i16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t src, int16_t value, size_t vl) { +vint16m2_t test_vslide1up_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { return vslide1up_vx_i16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t src, int16_t value, size_t vl) { +vint16m4_t test_vslide1up_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { return vslide1up_vx_i16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t src, int16_t value, size_t vl) { +vint16m8_t test_vslide1up_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { return vslide1up_vx_i16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vslide1up_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t src, int32_t value, - size_t vl) { +vint32mf2_t test_vslide1up_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl) { return vslide1up_vx_i32mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t src, int32_t value, size_t vl) { +vint32m1_t test_vslide1up_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { return vslide1up_vx_i32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t src, int32_t value, size_t vl) { +vint32m2_t test_vslide1up_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { return vslide1up_vx_i32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t src, int32_t value, size_t vl) { +vint32m4_t test_vslide1up_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { return vslide1up_vx_i32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t src, int32_t value, size_t vl) { +vint32m8_t test_vslide1up_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { return vslide1up_vx_i32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t src, int64_t value, size_t vl) { +vint64m1_t test_vslide1up_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { return vslide1up_vx_i64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t src, int64_t value, size_t vl) { +vint64m2_t test_vslide1up_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { return vslide1up_vx_i64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t src, int64_t value, size_t vl) { +vint64m4_t test_vslide1up_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { return vslide1up_vx_i64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t src, int64_t value, size_t vl) { +vint64m8_t test_vslide1up_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { return vslide1up_vx_i64m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vslide1up_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t src, uint8_t value, - size_t vl) { +vuint8mf8_t test_vslide1up_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8mf8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vslide1up_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t src, uint8_t value, - size_t vl) { +vuint8mf4_t test_vslide1up_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vslide1up_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t src, uint8_t value, - size_t vl) { +vuint8mf2_t test_vslide1up_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t src, uint8_t value, size_t vl) { +vuint8m1_t test_vslide1up_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t src, uint8_t value, size_t vl) { +vuint8m2_t test_vslide1up_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t src, uint8_t value, size_t vl) { +vuint8m4_t test_vslide1up_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t src, uint8_t value, size_t vl) { +vuint8m8_t test_vslide1up_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { return vslide1up_vx_u8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vslide1up_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t src, uint16_t value, - size_t vl) { +vuint16mf4_t test_vslide1up_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl) { return vslide1up_vx_u16mf4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vslide1up_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t src, uint16_t value, - size_t vl) { +vuint16mf2_t test_vslide1up_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl) { return vslide1up_vx_u16mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t src, uint16_t value, - size_t vl) { +vuint16m1_t test_vslide1up_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { return vslide1up_vx_u16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t src, uint16_t value, - size_t vl) { +vuint16m2_t test_vslide1up_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { return vslide1up_vx_u16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t src, uint16_t value, - size_t vl) { +vuint16m4_t test_vslide1up_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { return vslide1up_vx_u16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t src, uint16_t value, - size_t vl) { +vuint16m8_t test_vslide1up_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { return vslide1up_vx_u16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vslide1up_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t src, uint32_t value, - size_t vl) { +vuint32mf2_t test_vslide1up_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl) { return vslide1up_vx_u32mf2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t src, uint32_t value, - size_t vl) { +vuint32m1_t test_vslide1up_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { return vslide1up_vx_u32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t src, uint32_t value, - size_t vl) { +vuint32m2_t test_vslide1up_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { return vslide1up_vx_u32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t src, uint32_t value, - size_t vl) { +vuint32m4_t test_vslide1up_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { return vslide1up_vx_u32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t src, uint32_t value, - size_t vl) { +vuint32m8_t test_vslide1up_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { return vslide1up_vx_u32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t src, uint64_t value, - size_t vl) { +vuint64m1_t test_vslide1up_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { return vslide1up_vx_u64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t src, uint64_t value, - size_t vl) { +vuint64m2_t test_vslide1up_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { return vslide1up_vx_u64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t src, uint64_t value, - size_t vl) { +vuint64m4_t test_vslide1up_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { return vslide1up_vx_u64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t src, uint64_t value, - size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { return vslide1up_vx_u64m8_m(mask, maskedoff, src, value, vl); } + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vslide1up_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8mf8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vslide1up_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vslide1up_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vslide1up_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vslide1up_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vslide1up_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vslide1up_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i8m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vslide1up_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i16mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vslide1up_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i16mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vslide1up_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i16m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vslide1up_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i16m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vslide1up_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i16m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vslide1up_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i16m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vslide1up_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i32mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vslide1up_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i32m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vslide1up_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i32m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vslide1up_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i32m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vslide1up_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i32m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vslide1up_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i64m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vslide1up_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i64m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vslide1up_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i64m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vslide1up_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_i64m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vslide1up_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8mf8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vslide1up_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vslide1up_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vslide1up_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vslide1up_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vslide1up_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vslide1up_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u8m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vslide1up_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u16mf4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vslide1up_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u16mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vslide1up_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u16m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vslide1up_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u16m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vslide1up_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u16m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vslide1up_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u16m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vslide1up_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u32mf2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vslide1up_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u32m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vslide1up_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u32m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vslide1up_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u32m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vslide1up_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u32m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vslide1up_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u64m1_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vslide1up_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u64m2_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vslide1up_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u64m4_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vslide1up_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VALUE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vslide1up_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl, uint8_t ta) { + return vslide1up_vx_u64m8_mt(mask, maskedoff, src, value, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c index ae4465a..cc8f320 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsll.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vsll_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vsll_vx_u64m8 (vuint64m8_t op1, size_t shift, size_t vl) { return vsll_vx_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsll_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { +vint8mf8_t test_vsll_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { return vsll_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsll_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { +vint8mf8_t test_vsll_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { return vsll_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsll_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { +vint8mf4_t test_vsll_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { return vsll_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsll_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { +vint8mf4_t test_vsll_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { return vsll_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsll_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { +vint8mf2_t test_vsll_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { return vsll_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsll_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { +vint8mf2_t test_vsll_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { return vsll_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsll_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { +vint8m1_t test_vsll_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { return vsll_vv_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsll_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { +vint8m1_t test_vsll_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { return vsll_vx_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsll_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { +vint8m2_t test_vsll_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { return vsll_vv_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsll_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { +vint8m2_t test_vsll_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { return vsll_vx_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsll_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { +vint8m4_t test_vsll_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { return vsll_vv_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsll_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { +vint8m4_t test_vsll_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { return vsll_vx_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsll_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { +vint8m8_t test_vsll_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { return vsll_vv_i8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsll_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { +vint8m8_t test_vsll_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { return vsll_vx_i8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsll_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { +vint16mf4_t test_vsll_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vsll_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsll_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { +vint16mf4_t test_vsll_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { return vsll_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsll_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { +vint16mf2_t test_vsll_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vsll_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsll_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { +vint16mf2_t test_vsll_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { return vsll_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsll_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { +vint16m1_t test_vsll_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { return vsll_vv_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsll_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { +vint16m1_t test_vsll_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { return vsll_vx_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsll_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { +vint16m2_t test_vsll_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { return vsll_vv_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsll_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { +vint16m2_t test_vsll_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { return vsll_vx_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsll_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { +vint16m4_t test_vsll_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { return vsll_vv_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsll_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { +vint16m4_t test_vsll_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { return vsll_vx_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsll_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { +vint16m8_t test_vsll_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { return vsll_vv_i16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsll_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { +vint16m8_t test_vsll_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { return vsll_vx_i16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsll_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { +vint32mf2_t test_vsll_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vsll_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsll_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { +vint32mf2_t test_vsll_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { return vsll_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsll_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { +vint32m1_t test_vsll_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { return vsll_vv_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsll_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { +vint32m1_t test_vsll_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { return vsll_vx_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsll_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { +vint32m2_t test_vsll_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { return vsll_vv_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsll_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { +vint32m2_t test_vsll_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { return vsll_vx_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsll_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { +vint32m4_t test_vsll_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { return vsll_vv_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsll_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { +vint32m4_t test_vsll_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { return vsll_vx_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsll_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { +vint32m8_t test_vsll_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { return vsll_vv_i32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsll_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { +vint32m8_t test_vsll_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { return vsll_vx_i32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsll_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { +vint64m1_t test_vsll_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { return vsll_vv_i64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsll_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { +vint64m1_t test_vsll_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { return vsll_vx_i64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsll_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { +vint64m2_t test_vsll_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { return vsll_vv_i64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsll_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { +vint64m2_t test_vsll_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { return vsll_vx_i64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsll_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { +vint64m4_t test_vsll_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { return vsll_vv_i64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsll_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { +vint64m4_t test_vsll_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { return vsll_vx_i64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsll_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { +vint64m8_t test_vsll_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { return vsll_vv_i64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsll_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { +vint64m8_t test_vsll_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vsll_vx_i64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsll_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { +vuint8mf8_t test_vsll_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { return vsll_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsll_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { +vuint8mf8_t test_vsll_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { return vsll_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsll_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { +vuint8mf4_t test_vsll_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { return vsll_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsll_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { +vuint8mf4_t test_vsll_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { return vsll_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsll_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { +vuint8mf2_t test_vsll_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { return vsll_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsll_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { +vuint8mf2_t test_vsll_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { return vsll_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsll_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { +vuint8m1_t test_vsll_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { return vsll_vv_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsll_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { +vuint8m1_t test_vsll_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { return vsll_vx_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsll_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { +vuint8m2_t test_vsll_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { return vsll_vv_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsll_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { +vuint8m2_t test_vsll_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { return vsll_vx_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsll_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { +vuint8m4_t test_vsll_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { return vsll_vv_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsll_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { +vuint8m4_t test_vsll_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { return vsll_vx_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsll_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { +vuint8m8_t test_vsll_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { return vsll_vv_u8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsll_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { +vuint8m8_t test_vsll_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { return vsll_vx_u8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsll_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { +vuint16mf4_t test_vsll_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vsll_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsll_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { +vuint16mf4_t test_vsll_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { return vsll_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsll_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { +vuint16mf2_t test_vsll_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vsll_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsll_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { +vuint16mf2_t test_vsll_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { return vsll_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsll_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { +vuint16m1_t test_vsll_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { return vsll_vv_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsll_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { +vuint16m1_t test_vsll_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { return vsll_vx_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsll_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { +vuint16m2_t test_vsll_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { return vsll_vv_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsll_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { +vuint16m2_t test_vsll_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { return vsll_vx_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsll_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { +vuint16m4_t test_vsll_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { return vsll_vv_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsll_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { +vuint16m4_t test_vsll_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { return vsll_vx_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsll_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { +vuint16m8_t test_vsll_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { return vsll_vv_u16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsll_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { +vuint16m8_t test_vsll_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { return vsll_vx_u16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsll_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { +vuint32mf2_t test_vsll_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vsll_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsll_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { +vuint32mf2_t test_vsll_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { return vsll_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsll_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { +vuint32m1_t test_vsll_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { return vsll_vv_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsll_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { +vuint32m1_t test_vsll_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { return vsll_vx_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsll_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { +vuint32m2_t test_vsll_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { return vsll_vv_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsll_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { +vuint32m2_t test_vsll_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { return vsll_vx_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsll_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { +vuint32m4_t test_vsll_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { return vsll_vv_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsll_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vsll_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { return vsll_vx_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsll_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { +vuint32m8_t test_vsll_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { return vsll_vv_u32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsll_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { +vuint32m8_t test_vsll_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { return vsll_vx_u32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsll_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { +vuint64m1_t test_vsll_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { return vsll_vv_u64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsll_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { +vuint64m1_t test_vsll_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { return vsll_vx_u64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsll_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { +vuint64m2_t test_vsll_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { return vsll_vv_u64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsll_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { +vuint64m2_t test_vsll_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { return vsll_vx_u64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsll_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { +vuint64m4_t test_vsll_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { return vsll_vv_u64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsll_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { +vuint64m4_t test_vsll_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { return vsll_vx_u64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { +vuint64m8_t test_vsll_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { return vsll_vv_u64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsll_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsll_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vsll_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vsll_vx_u64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsll_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsll_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsll_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsll_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsll_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsll_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsll_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsll_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsll_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsll_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsll_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsll_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsll_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsll_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsll_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsll_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsll_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsll_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsll_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsll_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsll_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_i64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsll_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_i64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsll_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsll_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsll_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsll_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsll_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsll_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsll_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsll_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsll_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsll_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsll_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsll_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsll_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsll_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsll_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsll_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsll_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsll_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsll_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsll_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsll_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl, uint8_t ta) { + return vsll_vv_u64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsll_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsll.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsll_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsll_vx_u64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c index 33480db7..909d93e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul.c @@ -396,449 +396,799 @@ vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8 (vint64m8_t op1, int64_t op2, size_t vl) { return vsmul_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vsmul_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vsmul_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsmul_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vsmul_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vsmul_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vsmul_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsmul_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vsmul_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vsmul_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vsmul_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsmul_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vsmul_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vsmul_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vsmul_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vsmul_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vsmul_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vsmul_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vsmul_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vsmul_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vsmul_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vsmul_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vsmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vsmul_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vsmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vsmul_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vsmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vsmul_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vsmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vsmul_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vsmul_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vsmul_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vsmul_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vsmul_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vsmul_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vsmul_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vsmul_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vsmul_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vsmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vsmul_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vsmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vsmul_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vsmul_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vsmul_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vsmul_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vsmul_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vsmul_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vsmul_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vsmul_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vsmul_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vsmul_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vsmul_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vsmul_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vsmul_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vsmul_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vsmul_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsmul_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsmul_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsmul_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsmul_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsmul_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsmul_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vsmul_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsmul_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsmul_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c index 5b94253..9dbb126 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c @@ -1722,6 +1722,216 @@ void test_vsoxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, return vsoxei64_v_u64m8(base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8_v_f16m2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8_v_f16m4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8_v_f16m8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16_v_f16m2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16_v_f16m4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16_v_f16m8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32_v_f16m2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32_v_f16m4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei64_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64_v_f16m2(base, bindex, value, vl); +} + // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -3841,112 +4051,322 @@ void test_vsoxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, return vsoxei64_v_u64m8_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, - vfloat32mf2_t value, size_t vl) { - return vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8_v_f16mf4_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, - vfloat32m1_t value, size_t vl) { - return vsoxei8_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8_v_f16mf2_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, - vfloat32m2_t value, size_t vl) { - return vsoxei8_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8_v_f16m1_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, - vfloat32m4_t value, size_t vl) { - return vsoxei8_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8_v_f16m2_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, - vfloat32m8_t value, size_t vl) { - return vsoxei8_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8_v_f16m4_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { - return vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsoxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8_v_f16m8_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, - vfloat32m1_t value, size_t vl) { - return vsoxei16_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16_v_f16mf4_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, - vfloat32m2_t value, size_t vl) { - return vsoxei16_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16_v_f16mf2_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, - vfloat32m4_t value, size_t vl) { - return vsoxei16_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsoxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16_v_f16m1_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, +void test_vsoxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16_v_f16m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16_v_f16m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16_v_f16m8_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32_v_f16mf4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32_v_f16mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32_v_f16m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32_v_f16m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32_v_f16m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei64_v_f16mf4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64_v_f16mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64_v_f16m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64_v_f16m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, + vfloat32mf2_t value, size_t vl) { + return vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, + vfloat32m1_t value, size_t vl) { + return vsoxei8_v_f32m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, + vfloat32m2_t value, size_t vl) { + return vsoxei8_v_f32m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, + vfloat32m4_t value, size_t vl) { + return vsoxei8_v_f32m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, + vfloat32m8_t value, size_t vl) { + return vsoxei8_v_f32m8_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, + vfloat32mf2_t value, size_t vl) { + return vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, + vfloat32m1_t value, size_t vl) { + return vsoxei16_v_f32m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, + vfloat32m2_t value, size_t vl) { + return vsoxei16_v_f32m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, + vfloat32m4_t value, size_t vl) { + return vsoxei16_v_f32m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsoxei16_v_f32m8_m(mask, base, bindex, value, vl); } @@ -4221,427 +4641,7 @@ void test_vsoxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsoxei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsoxei64_v_f64m8_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei8_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei8_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei8_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei8_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei8_v_f16m4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei8_v_f16m8 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei16_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei16_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei16_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei16_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei16_v_f16m4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei16_v_f16m8 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei32_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei32_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei32_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei32_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei32_v_f16m4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei64_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei64_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei64_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei64_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei8_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei8_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei8_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei8_v_f16m2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei8_v_f16m4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei8_v_f16m8_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei16_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei16_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei16_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei16_v_f16m2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei16_v_f16m4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsoxei16_v_f16m8_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei32_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei32_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei32_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei32_v_f16m2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsoxei32_v_f16m4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsoxei64_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsoxei64_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsoxei64_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsoxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsoxei64_v_f16m2_m (mask, base, bindex, value, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c index c164b89..607cf9e 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxseg.c @@ -6091,9956 +6091,9957 @@ void test_vsoxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t return vsoxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsoxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsoxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsoxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsoxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsoxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsoxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsoxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsoxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsoxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsoxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsoxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsoxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsoxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsoxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsoxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsoxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsoxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsoxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsoxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsoxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsoxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsoxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsoxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsoxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsoxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsoxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsoxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsoxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsoxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsoxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsoxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsoxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsoxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg5ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsoxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsoxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsoxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg6ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsoxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsoxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsoxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg7ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsoxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsoxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsoxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg8ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsoxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsoxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsoxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsoxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsoxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsoxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsoxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsoxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsoxseg4ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsoxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsoxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsoxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsoxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsoxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c index 250304d..3a55eb5 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsra.c @@ -396,402 +396,799 @@ vint64m8_t test_vsra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { +vint64m8_t test_vsra_vx_i64m8 (vint64m8_t op1, size_t shift, size_t vl) { return vsra_vx_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { +vint8mf8_t test_vsra_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { return vsra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { +vint8mf8_t test_vsra_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { return vsra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { +vint8mf4_t test_vsra_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { return vsra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { +vint8mf4_t test_vsra_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { return vsra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { +vint8mf2_t test_vsra_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { return vsra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { +vint8mf2_t test_vsra_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { return vsra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { +vint8m1_t test_vsra_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { return vsra_vv_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { +vint8m1_t test_vsra_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { return vsra_vx_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { +vint8m2_t test_vsra_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { return vsra_vv_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { +vint8m2_t test_vsra_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { return vsra_vx_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { +vint8m4_t test_vsra_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { return vsra_vv_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { +vint8m4_t test_vsra_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { return vsra_vx_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { +vint8m8_t test_vsra_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { return vsra_vv_i8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { +vint8m8_t test_vsra_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { return vsra_vx_i8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { +vint16mf4_t test_vsra_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vsra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { +vint16mf4_t test_vsra_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { return vsra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { +vint16mf2_t test_vsra_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vsra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { +vint16mf2_t test_vsra_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { return vsra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { +vint16m1_t test_vsra_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { return vsra_vv_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { +vint16m1_t test_vsra_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { return vsra_vx_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { +vint16m2_t test_vsra_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { return vsra_vv_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { +vint16m2_t test_vsra_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { return vsra_vx_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { +vint16m4_t test_vsra_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { return vsra_vv_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { +vint16m4_t test_vsra_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { return vsra_vx_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { +vint16m8_t test_vsra_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { return vsra_vv_i16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { +vint16m8_t test_vsra_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { return vsra_vx_i16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { +vint32mf2_t test_vsra_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vsra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { +vint32mf2_t test_vsra_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { return vsra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { +vint32m1_t test_vsra_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { return vsra_vv_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { +vint32m1_t test_vsra_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { return vsra_vx_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { +vint32m2_t test_vsra_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { return vsra_vv_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { +vint32m2_t test_vsra_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { return vsra_vx_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { +vint32m4_t test_vsra_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { return vsra_vv_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { +vint32m4_t test_vsra_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { return vsra_vx_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { +vint32m8_t test_vsra_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { return vsra_vv_i32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { +vint32m8_t test_vsra_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { return vsra_vx_i32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { +vint64m1_t test_vsra_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { return vsra_vv_i64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { +vint64m1_t test_vsra_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { return vsra_vx_i64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { +vint64m2_t test_vsra_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { return vsra_vv_i64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { +vint64m2_t test_vsra_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { return vsra_vx_i64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { +vint64m4_t test_vsra_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { return vsra_vv_i64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { +vint64m4_t test_vsra_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { return vsra_vx_i64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { +vint64m8_t test_vsra_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { return vsra_vv_i64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { +vint64m8_t test_vsra_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vsra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsra_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsra_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsra_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsra_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsra_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsra_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsra_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsra_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsra_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsra_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsra_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsra_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsra_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsra_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsra_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsra_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsra_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsra_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsra_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsra_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsra_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl, uint8_t ta) { + return vsra_vv_i64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsra_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsra_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsra_vx_i64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c index 57ff0e5..4c90517 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsrl.c @@ -396,403 +396,799 @@ vuint64m8_t test_vsrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vsrl_vx_u64m8 (vuint64m8_t op1, size_t shift, size_t vl) { return vsrl_vx_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { +vuint8mf8_t test_vsrl_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { return vsrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { +vuint8mf8_t test_vsrl_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { return vsrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { +vuint8mf4_t test_vsrl_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { return vsrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { +vuint8mf4_t test_vsrl_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { return vsrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { +vuint8mf2_t test_vsrl_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { return vsrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { +vuint8mf2_t test_vsrl_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { return vsrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { +vuint8m1_t test_vsrl_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { return vsrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { +vuint8m1_t test_vsrl_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { return vsrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { +vuint8m2_t test_vsrl_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { return vsrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { +vuint8m2_t test_vsrl_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { return vsrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { +vuint8m4_t test_vsrl_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { return vsrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { +vuint8m4_t test_vsrl_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { return vsrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { +vuint8m8_t test_vsrl_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { return vsrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { +vuint8m8_t test_vsrl_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { return vsrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { +vuint16mf4_t test_vsrl_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vsrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { +vuint16mf4_t test_vsrl_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { return vsrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { +vuint16mf2_t test_vsrl_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vsrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { +vuint16mf2_t test_vsrl_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { return vsrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { +vuint16m1_t test_vsrl_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { return vsrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { +vuint16m1_t test_vsrl_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { return vsrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { +vuint16m2_t test_vsrl_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { return vsrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { +vuint16m2_t test_vsrl_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { return vsrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { +vuint16m4_t test_vsrl_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { return vsrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { +vuint16m4_t test_vsrl_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { return vsrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { +vuint16m8_t test_vsrl_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { return vsrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { +vuint16m8_t test_vsrl_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { return vsrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { +vuint32mf2_t test_vsrl_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vsrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { +vuint32mf2_t test_vsrl_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { return vsrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { +vuint32m1_t test_vsrl_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { return vsrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { +vuint32m1_t test_vsrl_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { return vsrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { +vuint32m2_t test_vsrl_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { return vsrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { +vuint32m2_t test_vsrl_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { return vsrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { +vuint32m4_t test_vsrl_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { return vsrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vsrl_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { return vsrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { +vuint32m8_t test_vsrl_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { return vsrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { +vuint32m8_t test_vsrl_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { return vsrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { +vuint64m1_t test_vsrl_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { return vsrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { +vuint64m1_t test_vsrl_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { return vsrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { +vuint64m2_t test_vsrl_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { return vsrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { +vuint64m2_t test_vsrl_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { return vsrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { +vuint64m4_t test_vsrl_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { return vsrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { +vuint64m4_t test_vsrl_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { return vsrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { +vuint64m8_t test_vsrl_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { return vsrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vsrl_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vsrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); } +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsrl_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsrl_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsrl_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsrl_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsrl_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsrl_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsrl_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsrl_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsrl_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsrl_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsrl_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsrl_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsrl_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsrl_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsrl_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsrl_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsrl_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsrl_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsrl_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsrl_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsrl_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl, uint8_t ta) { + return vsrl_vv_u64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsrl_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsrl_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vsrl_vx_u64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c index b9d3588..2a2dc76 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c @@ -490,6 +490,66 @@ void test_vsse64_v_u64m8(uint64_t *base, ptrdiff_t bstride, vuint64m8_t value, return vsse64_v_u64m8(base, bstride, value, vl); } +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf4(_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { + return vsse16_v_f16mf4(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf2(_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { + return vsse16_v_f16mf2(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m1(_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return vsse16_v_f16m1(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m2(_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { + return vsse16_v_f16m2(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m4(_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { + return vsse16_v_f16m4(base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m8(_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { + return vsse16_v_f16m8(base, bstride, value, vl); +} + // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1073,6 +1133,66 @@ void test_vsse64_v_u64m8_m(vbool8_t mask, uint64_t *base, ptrdiff_t bstride, return vsse64_v_u64m8_m(mask, base, bstride, value, vl); } +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf4_m(vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { + return vsse16_v_f16mf4_m(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf2_m(vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { + return vsse16_v_f16mf2_m(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m1_m(vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return vsse16_v_f16m1_m(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m2_m(vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { + return vsse16_v_f16m2_m(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m4_m(vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { + return vsse16_v_f16m4_m(mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m8_m(vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { + return vsse16_v_f16m8_m(mask, base, bstride, value, vl); +} + // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1167,127 +1287,7 @@ void test_vsse64_v_f64m4_m(vbool16_t mask, double *base, ptrdiff_t bstride, // CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsse64_v_f64m8_m(vbool8_t mask, double *base, ptrdiff_t bstride, - vfloat64m8_t value, size_t vl) { +void test_vsse64_v_f64m8_m (vbool8_t mask, double *base, ptrdiff_t bstride, vfloat64m8_t value, size_t vl) { return vsse64_v_f64m8_m(mask, base, bstride, value, vl); } -// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return vsse16_v_f16mf4 (base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return vsse16_v_f16mf2 (base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return vsse16_v_f16m1 (base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return vsse16_v_f16m2 (base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m4 (_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return vsse16_v_f16m4 (base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m8 (_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return vsse16_v_f16m8 (base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { - return vsse16_v_f16mf4_m (mask, base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { - return vsse16_v_f16mf2_m (mask, base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { - return vsse16_v_f16m1_m (mask, base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { - return vsse16_v_f16m2_m (mask, base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { - return vsse16_v_f16m4_m (mask, base, bstride, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsse16_v_f16m8_m (vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { - return vsse16_v_f16m8_m (mask, base, bstride, value, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c index 23f08dd..df1162c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsseg.c @@ -1555,6 +1555,231 @@ void test_vsseg2e64_v_u64m4 (uint64_t *base, vuint64m4_t v0, vuint64m4_t v1, siz return vsseg2e64_v_u64m4(base, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsseg2e16_v_f16mf4(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsseg3e16_v_f16mf4(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsseg4e16_v_f16mf4(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsseg5e16_v_f16mf4(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsseg6e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsseg7e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsseg8e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsseg2e16_v_f16mf2(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsseg3e16_v_f16mf2(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsseg4e16_v_f16mf2(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsseg5e16_v_f16mf2(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsseg6e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsseg7e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsseg8e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsseg2e16_v_f16m1(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsseg3e16_v_f16m1(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsseg4e16_v_f16m1(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsseg5e16_v_f16m1(base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsseg6e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsseg7e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsseg8e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsseg2e16_v_f16m2(base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsseg3e16_v_f16m2(base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsseg4e16_v_f16m2(base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m4 (_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsseg2e16_v_f16m4(base, v0, v1, vl); +} + // CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[VL:%.*]]) @@ -3364,41 +3589,266 @@ void test_vsseg2e64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t v0, v return vsseg2e64_v_u64m4_m(mask, base, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg2e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsseg2e32_v_f32mf2_m(mask, base, v0, v1, vl); +void test_vsseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsseg2e16_v_f16mf4_m(mask, base, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg3e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsseg3e32_v_f32mf2_m(mask, base, v0, v1, v2, vl); +void test_vsseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsseg3e16_v_f16mf4_m(mask, base, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg4e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsseg4e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, vl); +void test_vsseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsseg4e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsseg5e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsseg5e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsseg5e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); -} +void test_vsseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsseg5e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsseg6e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsseg7e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsseg8e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsseg2e16_v_f16mf2_m(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsseg3e16_v_f16mf2_m(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsseg4e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsseg5e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsseg6e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsseg7e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsseg8e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsseg2e16_v_f16m1_m(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsseg3e16_v_f16m1_m(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsseg4e16_v_f16m1_m(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsseg5e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsseg6e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsseg7e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsseg8e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsseg2e16_v_f16m2_m(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsseg3e16_v_f16m2_m(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsseg4e16_v_f16m2_m(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsseg2e16_v_f16m4_m(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg2e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg2e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsseg2e32_v_f32mf2_m(mask, base, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg3e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg3e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsseg3e32_v_f32mf2_m(mask, base, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg4e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg4e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsseg4e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vsseg5e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsseg5e32_v_f32mf2_m (vbool64_t mask, float *base, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsseg5e32_v_f32mf2_m(mask, base, v0, v1, v2, v3, v4, vl); +} // CHECK-RV64-LABEL: @test_vsseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: @@ -3625,452 +4075,3 @@ void test_vsseg2e64_v_f64m4_m (vbool16_t mask, double *base, vfloat64m4_t v0, vf return vsseg2e64_v_f64m4_m(mask, base, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsseg2e16_v_f16mf4(base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsseg3e16_v_f16mf4(base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsseg4e16_v_f16mf4(base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg5e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsseg5e16_v_f16mf4(base, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg6e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsseg6e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg7e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsseg7e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg8e16_v_f16mf4 (_Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsseg8e16_v_f16mf4(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsseg2e16_v_f16mf2(base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsseg3e16_v_f16mf2(base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsseg4e16_v_f16mf2(base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg5e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsseg5e16_v_f16mf2(base, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg6e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsseg6e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg7e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsseg7e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg8e16_v_f16mf2 (_Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsseg8e16_v_f16mf2(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsseg2e16_v_f16m1(base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsseg3e16_v_f16m1(base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsseg4e16_v_f16m1(base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg5e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsseg5e16_v_f16m1(base, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg6e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsseg6e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg7e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsseg7e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg8e16_v_f16m1 (_Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsseg8e16_v_f16m1(base, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsseg2e16_v_f16m2(base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsseg3e16_v_f16m2(base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16m2 (_Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsseg4e16_v_f16m2(base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16m4 (_Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsseg2e16_v_f16m4(base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsseg2e16_v_f16mf4_m(mask, base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsseg3e16_v_f16mf4_m(mask, base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsseg4e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsseg5e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsseg6e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsseg7e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsseg8e16_v_f16mf4_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsseg2e16_v_f16mf2_m(mask, base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsseg3e16_v_f16mf2_m(mask, base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsseg4e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsseg5e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsseg6e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsseg7e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsseg8e16_v_f16mf2_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsseg2e16_v_f16m1_m(mask, base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsseg3e16_v_f16m1_m(mask, base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsseg4e16_v_f16m1_m(mask, base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg5e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsseg5e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg6e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsseg6e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg7e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsseg7e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg8e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsseg8e16_v_f16m1_m(mask, base, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsseg2e16_v_f16m2_m(mask, base, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg3e16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsseg3e16_v_f16m2_m(mask, base, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg4e16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsseg4e16_v_f16m2_m(mask, base, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vsseg2e16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsseg2e16_v_f16m4_m(mask, base, v0, v1, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c index 5748a79..92e3afe 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssra.c @@ -399,449 +399,799 @@ vint64m8_t test_vssra_vv_i64m8(vint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vx_i64m8(vint64m8_t op1, size_t shift, size_t vl) { +vint64m8_t test_vssra_vx_i64m8 (vint64m8_t op1, size_t shift, size_t vl) { return vssra_vx_i64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { +vint8mf8_t test_vssra_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl) { return vssra_vv_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssra_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, size_t shift, size_t vl) { +vint8mf8_t test_vssra_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl) { return vssra_vx_i8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { +vint8mf4_t test_vssra_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl) { return vssra_vv_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssra_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, size_t shift, size_t vl) { +vint8mf4_t test_vssra_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl) { return vssra_vx_i8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { +vint8mf2_t test_vssra_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl) { return vssra_vv_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssra_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, size_t shift, size_t vl) { +vint8mf2_t test_vssra_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl) { return vssra_vx_i8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vuint8m1_t shift, size_t vl) { +vint8m1_t test_vssra_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl) { return vssra_vv_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssra_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, size_t shift, size_t vl) { +vint8m1_t test_vssra_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl) { return vssra_vx_i8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vuint8m2_t shift, size_t vl) { +vint8m2_t test_vssra_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl) { return vssra_vv_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssra_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, size_t shift, size_t vl) { +vint8m2_t test_vssra_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl) { return vssra_vx_i8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vuint8m4_t shift, size_t vl) { +vint8m4_t test_vssra_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl) { return vssra_vv_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssra_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, size_t shift, size_t vl) { +vint8m4_t test_vssra_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl) { return vssra_vx_i8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vuint8m8_t shift, size_t vl) { +vint8m8_t test_vssra_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl) { return vssra_vv_i8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssra_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, size_t shift, size_t vl) { +vint8m8_t test_vssra_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl) { return vssra_vx_i8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vuint16mf4_t shift, - size_t vl) { +vint16mf4_t test_vssra_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vssra_vv_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssra_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, size_t shift, size_t vl) { +vint16mf4_t test_vssra_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl) { return vssra_vx_i16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vuint16mf2_t shift, - size_t vl) { +vint16mf2_t test_vssra_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vssra_vv_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssra_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, size_t shift, size_t vl) { +vint16mf2_t test_vssra_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl) { return vssra_vx_i16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vuint16m1_t shift, size_t vl) { +vint16m1_t test_vssra_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl) { return vssra_vv_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssra_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, size_t shift, size_t vl) { +vint16m1_t test_vssra_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl) { return vssra_vx_i16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vuint16m2_t shift, size_t vl) { +vint16m2_t test_vssra_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl) { return vssra_vv_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssra_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, size_t shift, size_t vl) { +vint16m2_t test_vssra_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl) { return vssra_vx_i16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vuint16m4_t shift, size_t vl) { +vint16m4_t test_vssra_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl) { return vssra_vv_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssra_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, size_t shift, size_t vl) { +vint16m4_t test_vssra_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl) { return vssra_vx_i16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vuint16m8_t shift, size_t vl) { +vint16m8_t test_vssra_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl) { return vssra_vv_i16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssra_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, size_t shift, size_t vl) { +vint16m8_t test_vssra_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl) { return vssra_vx_i16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vuint32mf2_t shift, - size_t vl) { +vint32mf2_t test_vssra_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vssra_vv_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssra_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, size_t shift, size_t vl) { +vint32mf2_t test_vssra_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl) { return vssra_vx_i32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vuint32m1_t shift, size_t vl) { +vint32m1_t test_vssra_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl) { return vssra_vv_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssra_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, size_t shift, size_t vl) { +vint32m1_t test_vssra_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl) { return vssra_vx_i32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vuint32m2_t shift, size_t vl) { +vint32m2_t test_vssra_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl) { return vssra_vv_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssra_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, size_t shift, size_t vl) { +vint32m2_t test_vssra_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl) { return vssra_vx_i32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vuint32m4_t shift, size_t vl) { +vint32m4_t test_vssra_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl) { return vssra_vv_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssra_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, size_t shift, size_t vl) { +vint32m4_t test_vssra_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl) { return vssra_vx_i32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vuint32m8_t shift, size_t vl) { +vint32m8_t test_vssra_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl) { return vssra_vv_i32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssra_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, size_t shift, size_t vl) { +vint32m8_t test_vssra_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl) { return vssra_vx_i32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vuint64m1_t shift, size_t vl) { +vint64m1_t test_vssra_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl) { return vssra_vv_i64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssra_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, size_t shift, size_t vl) { +vint64m1_t test_vssra_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl) { return vssra_vx_i64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vuint64m2_t shift, size_t vl) { +vint64m2_t test_vssra_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl) { return vssra_vv_i64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssra_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, size_t shift, size_t vl) { +vint64m2_t test_vssra_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl) { return vssra_vx_i64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vuint64m4_t shift, size_t vl) { +vint64m4_t test_vssra_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl) { return vssra_vv_i64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssra_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, size_t shift, size_t vl) { +vint64m4_t test_vssra_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl) { return vssra_vx_i64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vuint64m8_t shift, size_t vl) { +vint64m8_t test_vssra_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl) { return vssra_vv_i64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssra_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssra_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, size_t shift, size_t vl) { +vint64m8_t test_vssra_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl) { return vssra_vx_i64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssra_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssra_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssra_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssra_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssra_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssra_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssra_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssra_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssra_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssra_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssra_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssra_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssra_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssra_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssra_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssra_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssra_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssra_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssra_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssra_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssra_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t shift, size_t vl, uint8_t ta) { + return vssra_vv_i64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssra_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssra.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssra_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssra_vx_i64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c index dfda5f9..ce05307 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssrl.c @@ -399,464 +399,799 @@ vuint64m8_t test_vssrl_vv_u64m8(vuint64m8_t op1, vuint64m8_t shift, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[SHIFT:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vx_u64m8(vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8 (vuint64m8_t op1, size_t shift, size_t vl) { return vssrl_vx_u64m8(op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t shift, - size_t vl) { +vuint8mf8_t test_vssrl_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl) { return vssrl_vv_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssrl_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, size_t shift, size_t vl) { +vuint8mf8_t test_vssrl_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl) { return vssrl_vx_u8mf8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t shift, - size_t vl) { +vuint8mf4_t test_vssrl_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl) { return vssrl_vv_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssrl_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, size_t shift, size_t vl) { +vuint8mf4_t test_vssrl_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl) { return vssrl_vx_u8mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t shift, - size_t vl) { +vuint8mf2_t test_vssrl_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl) { return vssrl_vv_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssrl_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, size_t shift, size_t vl) { +vuint8mf2_t test_vssrl_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl) { return vssrl_vx_u8mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t shift, size_t vl) { +vuint8m1_t test_vssrl_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl) { return vssrl_vv_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssrl_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, size_t shift, size_t vl) { +vuint8m1_t test_vssrl_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl) { return vssrl_vx_u8m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t shift, size_t vl) { +vuint8m2_t test_vssrl_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl) { return vssrl_vv_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssrl_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, size_t shift, size_t vl) { +vuint8m2_t test_vssrl_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl) { return vssrl_vx_u8m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t shift, size_t vl) { +vuint8m4_t test_vssrl_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl) { return vssrl_vv_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssrl_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, size_t shift, size_t vl) { +vuint8m4_t test_vssrl_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl) { return vssrl_vx_u8m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t shift, size_t vl) { +vuint8m8_t test_vssrl_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl) { return vssrl_vv_u8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssrl_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, size_t shift, size_t vl) { +vuint8m8_t test_vssrl_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl) { return vssrl_vx_u8m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t shift, - size_t vl) { +vuint16mf4_t test_vssrl_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl) { return vssrl_vv_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssrl_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, size_t shift, size_t vl) { +vuint16mf4_t test_vssrl_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl) { return vssrl_vx_u16mf4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t shift, - size_t vl) { +vuint16mf2_t test_vssrl_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl) { return vssrl_vv_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssrl_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, size_t shift, size_t vl) { +vuint16mf2_t test_vssrl_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl) { return vssrl_vx_u16mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t shift, - size_t vl) { +vuint16m1_t test_vssrl_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl) { return vssrl_vv_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssrl_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, size_t shift, size_t vl) { +vuint16m1_t test_vssrl_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl) { return vssrl_vx_u16m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t shift, - size_t vl) { +vuint16m2_t test_vssrl_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl) { return vssrl_vv_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssrl_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, size_t shift, size_t vl) { +vuint16m2_t test_vssrl_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl) { return vssrl_vx_u16m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t shift, - size_t vl) { +vuint16m4_t test_vssrl_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl) { return vssrl_vv_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssrl_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, size_t shift, size_t vl) { +vuint16m4_t test_vssrl_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl) { return vssrl_vx_u16m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t shift, - size_t vl) { +vuint16m8_t test_vssrl_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl) { return vssrl_vv_u16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssrl_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, size_t shift, size_t vl) { +vuint16m8_t test_vssrl_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl) { return vssrl_vx_u16m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t shift, - size_t vl) { +vuint32mf2_t test_vssrl_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl) { return vssrl_vv_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssrl_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, size_t shift, size_t vl) { +vuint32mf2_t test_vssrl_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl) { return vssrl_vx_u32mf2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t shift, - size_t vl) { +vuint32m1_t test_vssrl_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl) { return vssrl_vv_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssrl_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, size_t shift, size_t vl) { +vuint32m1_t test_vssrl_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl) { return vssrl_vx_u32m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t shift, - size_t vl) { +vuint32m2_t test_vssrl_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl) { return vssrl_vv_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssrl_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, size_t shift, size_t vl) { +vuint32m2_t test_vssrl_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl) { return vssrl_vx_u32m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t shift, - size_t vl) { +vuint32m4_t test_vssrl_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl) { return vssrl_vv_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssrl_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, size_t shift, size_t vl) { +vuint32m4_t test_vssrl_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl) { return vssrl_vx_u32m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t shift, - size_t vl) { +vuint32m8_t test_vssrl_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl) { return vssrl_vv_u32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssrl_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, size_t shift, size_t vl) { +vuint32m8_t test_vssrl_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl) { return vssrl_vx_u32m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t shift, - size_t vl) { +vuint64m1_t test_vssrl_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl) { return vssrl_vv_u64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssrl_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, size_t shift, size_t vl) { +vuint64m1_t test_vssrl_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl) { return vssrl_vx_u64m1_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t shift, - size_t vl) { +vuint64m2_t test_vssrl_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl) { return vssrl_vv_u64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssrl_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, size_t shift, size_t vl) { +vuint64m2_t test_vssrl_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl) { return vssrl_vx_u64m2_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t shift, - size_t vl) { +vuint64m4_t test_vssrl_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl) { return vssrl_vv_u64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssrl_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, size_t shift, size_t vl) { +vuint64m4_t test_vssrl_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl) { return vssrl_vx_u64m4_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t shift, - size_t vl) { +vuint64m8_t test_vssrl_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl) { return vssrl_vv_u64m8_m(mask, maskedoff, op1, shift, vl); } // CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssrl_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, size_t shift, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl) { return vssrl_vx_u64m8_m(mask, maskedoff, op1, shift, vl); } + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssrl_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8mf8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssrl_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssrl_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssrl_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssrl_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssrl_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv64i8.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssrl_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u8m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssrl_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u16mf4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssrl_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u16mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssrl_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u16m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssrl_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u16m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssrl_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u16m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv32i16.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssrl_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u16m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssrl_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u32mf2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssrl_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u32m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssrl_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u32m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssrl_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u32m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv16i32.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssrl_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u32m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssrl_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u64m1_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssrl_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u64m2_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssrl_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u64m4_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t shift, size_t vl, uint8_t ta) { + return vssrl_vv_u64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssrl_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssrl.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[SHIFT:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssrl_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t shift, size_t vl, uint8_t ta) { + return vssrl_vx_u64m8_mt(mask, maskedoff, op1, shift, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c index 5e8dd8b..5e645fd 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssseg.c @@ -1555,6 +1555,231 @@ void test_vssseg2e64_v_u64m4 (uint64_t *base, ptrdiff_t bstride, vuint64m4_t v0, return vssseg2e64_v_u64m4(base, bstride, v0, v1, vl); } +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vssseg2e16_v_f16mf4(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vssseg3e16_v_f16mf4(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vssseg4e16_v_f16mf4(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vssseg5e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vssseg6e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vssseg7e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vssseg8e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vssseg2e16_v_f16mf2(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vssseg3e16_v_f16mf2(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vssseg4e16_v_f16mf2(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vssseg5e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vssseg6e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vssseg7e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vssseg8e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vssseg2e16_v_f16m1(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vssseg3e16_v_f16m1(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vssseg4e16_v_f16m1(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vssseg5e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vssseg6e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vssseg7e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vssseg8e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vssseg2e16_v_f16m2(base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vssseg3e16_v_f16m2(base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vssseg4e16_v_f16m2(base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m4 (_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vssseg2e16_v_f16m4(base, bstride, v0, v1, vl); +} + // CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) @@ -3364,41 +3589,266 @@ void test_vssseg2e64_v_u64m4_m (vbool16_t mask, uint64_t *base, ptrdiff_t bstrid return vssseg2e64_v_u64m4_m(mask, base, bstride, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg2e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vssseg2e32_v_f32mf2_m(mask, base, bstride, v0, v1, vl); +void test_vssseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vssseg2e16_v_f16mf4_m(mask, base, bstride, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg3e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vssseg3e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, vl); +void test_vssseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vssseg3e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg4e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vssseg4e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +void test_vssseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vssseg4e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vssseg5e32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vssseg5e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vssseg5e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); -} +void test_vssseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vssseg5e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vssseg6e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vssseg7e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vssseg8e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vssseg2e16_v_f16mf2_m(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vssseg3e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vssseg4e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vssseg5e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vssseg6e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vssseg7e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vssseg8e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vssseg2e16_v_f16m1_m(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vssseg3e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vssseg4e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vssseg5e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vssseg6e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vssseg7e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vssseg8e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vssseg2e16_v_f16m2_m(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vssseg3e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vssseg4e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vssseg2e16_v_f16m4_m(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg2e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg2e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vssseg2e32_v_f32mf2_m(mask, base, bstride, v0, v1, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg3e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg3e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vssseg3e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg4e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg4e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vssseg4e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); +} + +// CHECK-RV64-LABEL: @test_vssseg5e32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vssseg5e32_v_f32mf2_m (vbool64_t mask, float *base, ptrdiff_t bstride, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vssseg5e32_v_f32mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); +} // CHECK-RV64-LABEL: @test_vssseg6e32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: @@ -3625,452 +4075,3 @@ void test_vssseg2e64_v_f64m4_m (vbool16_t mask, double *base, ptrdiff_t bstride, return vssseg2e64_v_f64m4_m(mask, base, bstride, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vssseg2e16_v_f16mf4(base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vssseg3e16_v_f16mf4(base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vssseg4e16_v_f16mf4(base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg5e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vssseg5e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg6e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vssseg6e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg7e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vssseg7e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg8e16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vssseg8e16_v_f16mf4(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vssseg2e16_v_f16mf2(base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vssseg3e16_v_f16mf2(base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vssseg4e16_v_f16mf2(base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg5e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vssseg5e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg6e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vssseg6e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg7e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vssseg7e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg8e16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vssseg8e16_v_f16mf2(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vssseg2e16_v_f16m1(base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vssseg3e16_v_f16m1(base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vssseg4e16_v_f16m1(base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg5e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vssseg5e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg6e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vssseg6e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg7e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vssseg7e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg8e16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vssseg8e16_v_f16m1(base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vssseg2e16_v_f16m2(base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vssseg3e16_v_f16m2(base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vssseg4e16_v_f16m2(base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16m4 (_Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vssseg2e16_v_f16m4(base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vssseg2e16_v_f16mf4_m(mask, base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vssseg3e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vssseg4e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg5e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vssseg5e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg6e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vssseg6e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg7e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vssseg7e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv1f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg8e16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vssseg8e16_v_f16mf4_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vssseg2e16_v_f16mf2_m(mask, base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vssseg3e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vssseg4e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg5e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vssseg5e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg6e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vssseg6e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg7e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vssseg7e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv2f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg8e16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vssseg8e16_v_f16mf2_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vssseg2e16_v_f16m1_m(mask, base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vssseg3e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vssseg4e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg5e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg5.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg5e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vssseg5e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg6e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg6.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg6e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vssseg6e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg7e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg7.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg7e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vssseg7e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg8e16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg8.mask.nxv4f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg8e16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vssseg8e16_v_f16m1_m(mask, base, bstride, v0, v1, v2, v3, v4, v5, v6, v7, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vssseg2e16_v_f16m2_m(mask, base, bstride, v0, v1, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg3e16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg3.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg3e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vssseg3e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg4e16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg4.mask.nxv8f16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg4e16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vssseg4e16_v_f16m2_m(mask, base, bstride, v0, v1, v2, v3, vl); -} - -// CHECK-RV64-LABEL: @test_vssseg2e16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vssseg2.mask.nxv16f16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vssseg2e16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vssseg2e16_v_f16m4_m(mask, base, bstride, v0, v1, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c index d0cfc7f..143e816 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vssub.c @@ -795,910 +795,1591 @@ vuint64m8_t test_vssubu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vssubu_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vssub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vssub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vssub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vssub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vssub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vssub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vssub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vssub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vssub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vssub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vssub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vssub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vssub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vssub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vssub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vssub_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vssub_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vssub_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vssub_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vssub_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vssub_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vssub_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, - vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vssub_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint16mf4_t test_vssub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vssub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vssub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vssub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vssub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, - size_t vl) { +vint16mf2_t test_vssub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vssub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vssub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vssub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vssub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vssub_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vssub_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vssub_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vssub_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vssub_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vssub_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vssub_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vssub_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, - size_t vl) { +vint32mf2_t test_vssub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vssub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vssub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vssub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vssub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vssub_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vssub_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vssub_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vssub_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vssub_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vssub_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vssub_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vssub_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vssub_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vssub_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vssub_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vssub_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vssub_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vssub_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vssub_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vssub_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint8mf8_t test_vssubu_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vssubu_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vssubu_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vssubu_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint8mf4_t test_vssubu_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vssubu_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vssubu_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vssubu_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint8mf2_t test_vssubu_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vssubu_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vssubu_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vssubu_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vssubu_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vssubu_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vssubu_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vssubu_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vssubu_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vssubu_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vssubu_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, - vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vssubu_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vssubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vssubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint16mf4_t test_vssubu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vssubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vssubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vssubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint16_t op2, - size_t vl) { +vuint16mf2_t test_vssubu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vssubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint16m1_t test_vssubu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vssubu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint16m2_t test_vssubu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vssubu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint16m4_t test_vssubu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vssubu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint16m8_t op2, - size_t vl) { +vuint16m8_t test_vssubu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vssubu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vssubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vssubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint32_t op2, - size_t vl) { +vuint32mf2_t test_vssubu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vssubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint32m1_t test_vssubu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vssubu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint32m2_t test_vssubu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vssubu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint32m4_t test_vssubu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vssubu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint32m8_t op2, - size_t vl) { +vuint32m8_t test_vssubu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vssubu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint64m1_t op2, - size_t vl) { +vuint64m1_t test_vssubu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vssubu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint64m2_t op2, - size_t vl) { +vuint64m2_t test_vssubu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vssubu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint64m4_t op2, - size_t vl) { +vuint64m4_t test_vssubu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vssubu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint64m8_t op2, - size_t vl) { +vuint64m8_t test_vssubu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vssub_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vssub_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vssub_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vssub_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vssub_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vssub_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vssub_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vssub_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vssub_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vssub_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vssub_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vssub_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vssub_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vssub_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vssub_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vssub_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vssub_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vssub_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vssub_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vssub_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vssub_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vssub_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vssub_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vssub_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vssubu_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vssubu_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vssubu_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vssubu_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vssubu_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vssubu_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vssubu_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vssubu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vssubu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vssubu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vssubu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vssubu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vssubu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vssubu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vssubu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vssubu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vssubu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vssubu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vssubu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vssubu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vssubu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vssubu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vssubu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vssubu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vssubu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c index 599da0b..d7590d3 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsub.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vsub_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vsub_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vsub_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vsub_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vsub_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vsub_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vsub_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vsub_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vsub_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vsub_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vsub_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vsub_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vsub_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vsub_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vsub_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vsub_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vsub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vsub_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vsub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vsub_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vsub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vsub_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vsub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vsub_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vsub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vsub_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vsub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vsub_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vsub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vsub_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vsub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vsub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vsub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vsub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vsub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vsub_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vsub_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vsub_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vsub_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vsub_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vsub_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vsub_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vsub_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vsub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vsub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vsub_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vsub_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vsub_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vsub_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vsub_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vsub_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vsub_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vsub_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vsub_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vsub_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vsub_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vsub_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vsub_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vsub_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vsub_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vsub_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vsub_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vsub_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vsub_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vsub_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vsub_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vsub_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vsub_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vsub_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vsub_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vsub_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vsub_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vsub_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vsub_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vsub_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vsub_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vsub_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vsub_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vsub_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vsub_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vsub_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vsub_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vsub_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vsub_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vsub_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vsub_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vsub_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vsub_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vsub_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vsub_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vsub_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vsub_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vsub_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vsub_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vsub_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vsub_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vsub_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vsub_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vsub_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vsub_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vsub_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vsub_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vsub_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vsub_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vsub_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vsub_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vsub_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vsub_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vsub_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vsub_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vsub_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vsub_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vsub_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vsub_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vsub_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vsub_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vsub_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vsub_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vsub_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vsub_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vsub_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vsub_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vsub_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vsub_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vsub_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vsub_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vsub_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vsub_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vsub_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vsub_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vsub_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vsub_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vsub_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vsub_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vsub_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vsub_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vsub_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vsub_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vsub_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vsub_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vsub_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vsub_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vsub_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vsub_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vsub_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vsub_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vsub_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsub_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsub_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsub_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsub_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vsub_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vsub_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsub_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsub_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsub_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsub_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsub_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsub_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsub_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsub_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsub_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsub_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsub_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsub_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsub_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vsub_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vsub_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vsub_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vsub_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vsub_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vsub_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vsub_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vsub_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vsub_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vsub_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vsub_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vsub_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vsub_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vsub_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vsub_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vsub_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vsub_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vsub_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vsub_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vsub_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vsub_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vsub_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vsub_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsub.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vsub_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vsub_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c index ae5675f..2ad619c 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c @@ -1722,6 +1722,216 @@ void test_vsuxei64_v_u64m8(uint64_t *base, vuint64m8_t bindex, return vsuxei64_v_u64m8(base, bindex, value, vl); } +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf4(_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf2(_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m1(_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m2(_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8_v_f16m2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m4(_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8_v_f16m4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m8(_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8_v_f16m8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf4(_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf2(_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m1(_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m2(_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16_v_f16m2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4(_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16_v_f16m4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8(_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16_v_f16m8(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4(_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2(_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1(_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2(_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32_v_f16m2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4(_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32_v_f16m4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4(_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei64_v_f16mf4(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf2(_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64_v_f16mf2(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1(_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64_v_f16m1(base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2(_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64_v_f16m2(base, bindex, value, vl); +} + // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -3841,112 +4051,322 @@ void test_vsuxei64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t bindex, return vsuxei64_v_u64m8_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, - vfloat32mf2_t value, size_t vl) { - return vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8_v_f16mf4_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, - vfloat32m1_t value, size_t vl) { - return vsuxei8_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8_v_f16mf2_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, - vfloat32m2_t value, size_t vl) { - return vsuxei8_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8_v_f16m1_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, - vfloat32m4_t value, size_t vl) { - return vsuxei8_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8_v_f16m2_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, - vfloat32m8_t value, size_t vl) { - return vsuxei8_v_f32m8_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8_v_f16m4_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, - vfloat32mf2_t value, size_t vl) { - return vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl); +void test_vsuxei8_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8_v_f16m8_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, - vfloat32m1_t value, size_t vl) { - return vsuxei16_v_f32m1_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16_v_f16mf4_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, - vfloat32m2_t value, size_t vl) { - return vsuxei16_v_f32m2_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16_v_f16mf2_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, - vfloat32m4_t value, size_t vl) { - return vsuxei16_v_f32m4_m(mask, base, bindex, value, vl); +void test_vsuxei16_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16_v_f16m1_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, +void test_vsuxei16_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16_v_f16m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16_v_f16m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8_m(vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16_v_f16m8_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32_v_f16mf4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32_v_f16mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32_v_f16m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32_v_f16m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4_m(vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32_v_f16m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4_m(vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei64_v_f16mf4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf2_m(vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64_v_f16mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1_m(vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64_v_f16m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2_m(vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64_v_f16m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32mf2_m(vbool64_t mask, float *base, vuint8mf8_t bindex, + vfloat32mf2_t value, size_t vl) { + return vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m1_m(vbool32_t mask, float *base, vuint8mf4_t bindex, + vfloat32m1_t value, size_t vl) { + return vsuxei8_v_f32m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m2_m(vbool16_t mask, float *base, vuint8mf2_t bindex, + vfloat32m2_t value, size_t vl) { + return vsuxei8_v_f32m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m4_m(vbool8_t mask, float *base, vuint8m1_t bindex, + vfloat32m4_t value, size_t vl) { + return vsuxei8_v_f32m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f32m8_m(vbool4_t mask, float *base, vuint8m2_t bindex, + vfloat32m8_t value, size_t vl) { + return vsuxei8_v_f32m8_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32mf2_m(vbool64_t mask, float *base, vuint16mf4_t bindex, + vfloat32mf2_t value, size_t vl) { + return vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m1_m(vbool32_t mask, float *base, vuint16mf2_t bindex, + vfloat32m1_t value, size_t vl) { + return vsuxei16_v_f32m1_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m2_m(vbool16_t mask, float *base, vuint16m1_t bindex, + vfloat32m2_t value, size_t vl) { + return vsuxei16_v_f32m2_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m4_m(vbool8_t mask, float *base, vuint16m2_t bindex, + vfloat32m4_t value, size_t vl) { + return vsuxei16_v_f32m4_m(mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f32m8_m(vbool4_t mask, float *base, vuint16m4_t bindex, vfloat32m8_t value, size_t vl) { return vsuxei16_v_f32m8_m(mask, base, bindex, value, vl); } @@ -4221,427 +4641,7 @@ void test_vsuxei64_v_f64m4_m(vbool16_t mask, double *base, vuint64m4_t bindex, // CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxei64_v_f64m8_m(vbool8_t mask, double *base, vuint64m8_t bindex, - vfloat64m8_t value, size_t vl) { +void test_vsuxei64_v_f64m8_m (vbool8_t mask, double *base, vuint64m8_t bindex, vfloat64m8_t value, size_t vl) { return vsuxei64_v_f64m8_m(mask, base, bindex, value, vl); } -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei8_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei8_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei8_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei8_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei8_v_f16m4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei8_v_f16m8 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei16_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei16_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei16_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei16_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei16_v_f16m4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei16_v_f16m8 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei32_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei32_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei32_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei32_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei32_v_f16m4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei64_v_f16mf4 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei64_v_f16mf2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei64_v_f16m1 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei64_v_f16m2 (base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei8_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei8_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei8_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei8_v_f16m2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei8_v_f16m4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei8_v_f16m8_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei16_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei16_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei16_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei16_v_f16m2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei16_v_f16m4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { - return vsuxei16_v_f16m8_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei32_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei32_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei32_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei32_v_f16m2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { - return vsuxei32_v_f16m4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { - return vsuxei64_v_f16mf4_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { - return vsuxei64_v_f16mf2_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { - return vsuxei64_v_f16m1_m (mask, base, bindex, value, vl); -} - -// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret void -// -void test_vsuxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { - return vsuxei64_v_f16m2_m (mask, base, bindex, value, vl); -} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c index 55bc69a..6f49904 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxseg.c @@ -6091,9956 +6091,9957 @@ void test_vsuxseg2ei64_v_u64m4 (uint64_t *base, vuint64m4_t bindex, vuint64m4_t return vsuxseg2ei64_v_u64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_f32mf2 (float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_f32m1 (float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f32m2 (float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4 (float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32mf2 (float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32m1 (float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32m2 (float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_f32m4 (float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei32_v_f32mf2 (float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f32m1 (float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f32m2 (float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_f32m4 (float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_f32mf2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_f32mf2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_f32mf2 (float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_f32mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f32m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f32m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_f32m1 (float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f32m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f32m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32m2 (float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f32m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f32m4 (float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei8_v_f64m1 (double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f64m2 (double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f64m4 (double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei16_v_f64m1 (double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_f64m2 (double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_f64m4 (double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei32_v_f64m1 (double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_f64m2 (double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_f64m4 (double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m1(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f64m1(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f64m1(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_f64m1 (double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f64m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m2(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f64m2(base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_f64m2 (double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f64m2(base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_f64m4 (double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m4(base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei8_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint8mf8_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei8_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint8mf4_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint8mf2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_i8m1_m (vbool8_t mask, int8_t *base, vuint8m1_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_i8m2_m (vbool4_t mask, int8_t *base, vuint8m2_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i8m4_m (vbool2_t mask, int8_t *base, vuint8m4_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei16_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint16mf4_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei16_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint16mf2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei16_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint16m1_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei16_v_i8m1_m (vbool8_t mask, int8_t *base, vuint16m2_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei16_v_i8m2_m (vbool4_t mask, int8_t *base, vuint16m4_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i8m4_m (vbool2_t mask, int8_t *base, vuint16m8_t bindex, vint8m4_t v0, vint8m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei32_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint32mf2_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei32_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint32m1_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint32m2_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_i8m1_m (vbool8_t mask, int8_t *base, vuint32m4_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_i8m2_m (vbool4_t mask, int8_t *base, vuint32m8_t bindex, vint8m2_t v0, vint8m2_t v1, vint8m2_t v2, vint8m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, size_t vl) { + return vsuxseg2ei64_v_i8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, size_t vl) { + return vsuxseg3ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, size_t vl) { + return vsuxseg4ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, size_t vl) { + return vsuxseg5ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, size_t vl) { + return vsuxseg6ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, size_t vl) { + return vsuxseg7ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_i8mf8_m (vbool64_t mask, int8_t *base, vuint64m1_t bindex, vint8mf8_t v0, vint8mf8_t v1, vint8mf8_t v2, vint8mf8_t v3, vint8mf8_t v4, vint8mf8_t v5, vint8mf8_t v6, vint8mf8_t v7, size_t vl) { + return vsuxseg8ei64_v_i8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_i8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_i8mf4_m (vbool32_t mask, int8_t *base, vuint64m2_t bindex, vint8mf4_t v0, vint8mf4_t v1, vint8mf4_t v2, vint8mf4_t v3, vint8mf4_t v4, vint8mf4_t v5, vint8mf4_t v6, vint8mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_i8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_i8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_i8mf2_m (vbool16_t mask, int8_t *base, vuint64m4_t bindex, vint8mf2_t v0, vint8mf2_t v1, vint8mf2_t v2, vint8mf2_t v3, vint8mf2_t v4, vint8mf2_t v5, vint8mf2_t v6, vint8mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_i8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_i8m1_m (vbool8_t mask, int8_t *base, vuint64m8_t bindex, vint8m1_t v0, vint8m1_t v1, vint8m1_t v2, vint8m1_t v3, vint8m1_t v4, vint8m1_t v5, vint8m1_t v6, vint8m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint8mf8_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint8mf4_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_i16m1_m (vbool16_t mask, int16_t *base, vuint8mf2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_i16m2_m (vbool8_t mask, int16_t *base, vuint8m1_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_i16m4_m (vbool4_t mask, int16_t *base, vuint8m2_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint16mf4_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint16mf2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_i16m1_m (vbool16_t mask, int16_t *base, vuint16m1_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_i16m2_m (vbool8_t mask, int16_t *base, vuint16m2_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_i16m4_m (vbool4_t mask, int16_t *base, vuint16m4_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint32mf2_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint32m1_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_i16m1_m (vbool16_t mask, int16_t *base, vuint32m2_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_i16m2_m (vbool8_t mask, int16_t *base, vuint32m4_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_i16m4_m (vbool4_t mask, int16_t *base, vuint32m8_t bindex, vint16m4_t v0, vint16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_i16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_i16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_i16mf4_m (vbool64_t mask, int16_t *base, vuint64m1_t bindex, vint16mf4_t v0, vint16mf4_t v1, vint16mf4_t v2, vint16mf4_t v3, vint16mf4_t v4, vint16mf4_t v5, vint16mf4_t v6, vint16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_i16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_i16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei64_v_i16mf2_m (vbool32_t mask, int16_t *base, vuint64m2_t bindex, vint16mf2_t v0, vint16mf2_t v1, vint16mf2_t v2, vint16mf2_t v3, vint16mf2_t v4, vint16mf2_t v5, vint16mf2_t v6, vint16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_i16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei64_v_i16m1_m (vbool16_t mask, int16_t *base, vuint64m4_t bindex, vint16m1_t v0, vint16m1_t v1, vint16m1_t v2, vint16m1_t v3, vint16m1_t v4, vint16m1_t v5, vint16m1_t v6, vint16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_i16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei64_v_i16m2_m (vbool8_t mask, int16_t *base, vuint64m8_t bindex, vint16m2_t v0, vint16m2_t v1, vint16m2_t v2, vint16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_i16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint8mf8_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_i32m1_m (vbool32_t mask, int32_t *base, vuint8mf4_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_i32m2_m (vbool16_t mask, int32_t *base, vuint8mf2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_i32m4_m (vbool8_t mask, int32_t *base, vuint8m1_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint16mf4_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_i32m1_m (vbool32_t mask, int32_t *base, vuint16mf2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_i32m2_m (vbool16_t mask, int32_t *base, vuint16m1_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_i32m4_m (vbool8_t mask, int32_t *base, vuint16m2_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei32_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint32mf2_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_i32m1_m (vbool32_t mask, int32_t *base, vuint32m1_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_i32m2_m (vbool16_t mask, int32_t *base, vuint32m2_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_i32m4_m (vbool8_t mask, int32_t *base, vuint32m4_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_i32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_i32mf2_m (vbool64_t mask, int32_t *base, vuint64m1_t bindex, vint32mf2_t v0, vint32mf2_t v1, vint32mf2_t v2, vint32mf2_t v3, vint32mf2_t v4, vint32mf2_t v5, vint32mf2_t v6, vint32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_i32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_i32m1_m (vbool32_t mask, int32_t *base, vuint64m2_t bindex, vint32m1_t v0, vint32m1_t v1, vint32m1_t v2, vint32m1_t v3, vint32m1_t v4, vint32m1_t v5, vint32m1_t v6, vint32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_i32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_i32m2_m (vbool16_t mask, int32_t *base, vuint64m4_t bindex, vint32m2_t v0, vint32m2_t v1, vint32m2_t v2, vint32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_i32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_i32m4_m (vbool8_t mask, int32_t *base, vuint64m8_t bindex, vint32m4_t v0, vint32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_i32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei8_v_i64m1_m (vbool64_t mask, int64_t *base, vuint8mf8_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_i64m2_m (vbool32_t mask, int64_t *base, vuint8mf4_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_i64m4_m (vbool16_t mask, int64_t *base, vuint8mf2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei16_v_i64m1_m (vbool64_t mask, int64_t *base, vuint16mf4_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_i64m2_m (vbool32_t mask, int64_t *base, vuint16mf2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_i64m4_m (vbool16_t mask, int64_t *base, vuint16m1_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei32_v_i64m1_m (vbool64_t mask, int64_t *base, vuint32mf2_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_i64m2_m (vbool32_t mask, int64_t *base, vuint32m1_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_i64m4_m (vbool16_t mask, int64_t *base, vuint32m2_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_i64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_i64m1_m (vbool64_t mask, int64_t *base, vuint64m1_t bindex, vint64m1_t v0, vint64m1_t v1, vint64m1_t v2, vint64m1_t v3, vint64m1_t v4, vint64m1_t v5, vint64m1_t v6, vint64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_i64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_i64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_i64m2_m (vbool32_t mask, int64_t *base, vuint64m2_t bindex, vint64m2_t v0, vint64m2_t v1, vint64m2_t v2, vint64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_i64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_i64m4_m (vbool16_t mask, int64_t *base, vuint64m4_t bindex, vint64m4_t v0, vint64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_i64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei8_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint8mf8_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei8_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint8mf4_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint8mf2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint8m1_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint8m2_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint8m4_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei16_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint16mf4_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei16_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint16mf2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei16_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint16m1_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei16_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint16m2_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei16_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint16m4_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u8m4_m (vbool2_t mask, uint8_t *base, vuint16m8_t bindex, vuint8m4_t v0, vuint8m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u8m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei32_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint32mf2_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei32_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint32m1_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint32m2_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint32m4_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u8m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_u8m2_m (vbool4_t mask, uint8_t *base, vuint32m8_t bindex, vuint8m2_t v0, vuint8m2_t v1, vuint8m2_t v2, vuint8m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u8m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, size_t vl) { + return vsuxseg2ei64_v_u8mf8_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, size_t vl) { + return vsuxseg3ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, size_t vl) { + return vsuxseg4ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, size_t vl) { + return vsuxseg5ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, size_t vl) { + return vsuxseg6ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, size_t vl) { + return vsuxseg7ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i8.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u8mf8_m (vbool64_t mask, uint8_t *base, vuint64m1_t bindex, vuint8mf8_t v0, vuint8mf8_t v1, vuint8mf8_t v2, vuint8mf8_t v3, vuint8mf8_t v4, vuint8mf8_t v5, vuint8mf8_t v6, vuint8mf8_t v7, size_t vl) { + return vsuxseg8ei64_v_u8mf8_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_u8mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i8.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u8mf4_m (vbool32_t mask, uint8_t *base, vuint64m2_t bindex, vuint8mf4_t v0, vuint8mf4_t v1, vuint8mf4_t v2, vuint8mf4_t v3, vuint8mf4_t v4, vuint8mf4_t v5, vuint8mf4_t v6, vuint8mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_u8mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_u8mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i8.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u8mf2_m (vbool16_t mask, uint8_t *base, vuint64m4_t bindex, vuint8mf2_t v0, vuint8mf2_t v1, vuint8mf2_t v2, vuint8mf2_t v3, vuint8mf2_t v4, vuint8mf2_t v5, vuint8mf2_t v6, vuint8mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_u8mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u8m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i8* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u8m1_m (vbool8_t mask, uint8_t *base, vuint64m8_t bindex, vuint8m1_t v0, vuint8m1_t v1, vuint8m1_t v2, vuint8m1_t v3, vuint8m1_t v4, vuint8m1_t v5, vuint8m1_t v6, vuint8m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u8m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint8mf8_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint8mf4_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei8_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint8mf2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei8_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint8m1_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei8_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint8m2_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei16_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint16mf4_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint16mf2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei16_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint16m1_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei16_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint16m2_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei16_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint16m4_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint32mf2_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint32m1_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei32_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint32m2_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei32_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint32m4_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_u16m4_m (vbool4_t mask, uint16_t *base, vuint32m8_t bindex, vuint16m4_t v0, vuint16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_u16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg2ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_u16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg3ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg4ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg5ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg7ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg8ei64_v_u16mf4_m (vbool64_t mask, uint16_t *base, vuint64m1_t bindex, vuint16mf4_t v0, vuint16mf4_t v1, vuint16mf4_t v2, vuint16mf4_t v3, vuint16mf4_t v4, vuint16mf4_t v5, vuint16mf4_t v6, vuint16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_u16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_u16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei64_v_u16mf2_m (vbool32_t mask, uint16_t *base, vuint64m2_t bindex, vuint16mf2_t v0, vuint16mf2_t v1, vuint16mf2_t v2, vuint16mf2_t v3, vuint16mf2_t v4, vuint16mf2_t v5, vuint16mf2_t v6, vuint16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_u16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg6ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4i16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei64_v_u16m1_m (vbool16_t mask, uint16_t *base, vuint64m4_t bindex, vuint16m1_t v0, vuint16m1_t v1, vuint16m1_t v2, vuint16m1_t v3, vuint16m1_t v4, vuint16m1_t v5, vuint16m1_t v6, vuint16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_u16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i16* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei64_v_u16m2_m (vbool8_t mask, uint16_t *base, vuint64m8_t bindex, vuint16m2_t v0, vuint16m2_t v1, vuint16m2_t v2, vuint16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_u16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint8mf8_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint8mf4_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint8mf2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint8m1_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint16mf4_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint16mf2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint16m1_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint16m2_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint32mf2_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint32m1_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint32m2_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint32m4_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_u32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_u32mf2_m (vbool64_t mask, uint32_t *base, vuint64m1_t bindex, vuint32mf2_t v0, vuint32mf2_t v1, vuint32mf2_t v2, vuint32mf2_t v3, vuint32mf2_t v4, vuint32mf2_t v5, vuint32mf2_t v6, vuint32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_u32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2i32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_u32m1_m (vbool32_t mask, uint32_t *base, vuint64m2_t bindex, vuint32m1_t v0, vuint32m1_t v1, vuint32m1_t v2, vuint32m1_t v3, vuint32m1_t v4, vuint32m1_t v5, vuint32m1_t v6, vuint32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_u32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4i32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_u32m2_m (vbool16_t mask, uint32_t *base, vuint64m4_t bindex, vuint32m2_t v0, vuint32m2_t v1, vuint32m2_t v2, vuint32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_u32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], i32* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei64_v_u32m4_m (vbool8_t mask, uint32_t *base, vuint64m8_t bindex, vuint32m4_t v0, vuint32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_u32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint8mf8_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint8mf4_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint8mf2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint16mf4_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint16mf2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint16m1_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint32mf2_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint32m1_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint32m2_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_u64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1i64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_u64m1_m (vbool64_t mask, uint64_t *base, vuint64m1_t bindex, vuint64m1_t v0, vuint64m1_t v1, vuint64m1_t v2, vuint64m1_t v3, vuint64m1_t v4, vuint64m1_t v5, vuint64m1_t v6, vuint64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_u64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_u64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2i64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_u64m2_m (vbool32_t mask, uint64_t *base, vuint64m2_t bindex, vuint64m2_t v0, vuint64m2_t v1, vuint64m2_t v2, vuint64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_u64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], i64* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_u64m4_m (vbool16_t mask, uint64_t *base, vuint64m4_t bindex, vuint64m4_t v0, vuint64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_u64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { - return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { + return vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { + return vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { + return vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { + return vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { + return vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { + return vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { + return vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4(base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4(base, bindex, v0, v1, vl); +void test_vsuxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4(base, bindex, v0, v1, v2, vl); +void test_vsuxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei8_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32mf2_m (vbool64_t mask, float *base, vuint8mf8_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei8_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg5ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg6ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg7ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1(base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg8ei8_v_f32m1_m (vbool32_t mask, float *base, vuint8mf4_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2(base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2(base, bindex, v0, v1, v2, vl); +void test_vsuxseg3ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2(base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg4ei8_v_f32m2_m (vbool16_t mask, float *base, vuint8mf2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg2ei8_v_f32m4_m (vbool8_t mask, float *base, vuint8m1_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei16_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32mf2_m (vbool64_t mask, float *base, vuint16mf4_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei16_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei8_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei16_v_f32m1_m (vbool32_t mask, float *base, vuint16mf2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei16_v_f32m2_m (vbool16_t mask, float *base, vuint16m1_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei16_v_f32m4_m (vbool8_t mask, float *base, vuint16m2_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei32_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei8_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei8_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei8_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f32mf2_m (vbool64_t mask, float *base, vuint32mf2_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei32_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg5ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg6ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg7ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei32_v_f32m1_m (vbool32_t mask, float *base, vuint32m1_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei32_v_f32m2_m (vbool16_t mask, float *base, vuint32m2_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei32_v_f32m4_m (vbool8_t mask, float *base, vuint32m4_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, size_t vl) { + return vsuxseg2ei64_v_f32mf2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei16_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, size_t vl) { + return vsuxseg3ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, size_t vl) { + return vsuxseg4ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, size_t vl) { + return vsuxseg5ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, size_t vl) { + return vsuxseg6ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, size_t vl) { + return vsuxseg7ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f32.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei64_v_f32mf2_m (vbool64_t mask, float *base, vuint64m1_t bindex, vfloat32mf2_t v0, vfloat32mf2_t v1, vfloat32mf2_t v2, vfloat32mf2_t v3, vfloat32mf2_t v4, vfloat32mf2_t v5, vfloat32mf2_t v6, vfloat32mf2_t v7, size_t vl) { + return vsuxseg8ei64_v_f32mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei16_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei16_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei16_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f32.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg8ei64_v_f32m1_m (vbool32_t mask, float *base, vuint64m2_t bindex, vfloat32m1_t v0, vfloat32m1_t v1, vfloat32m1_t v2, vfloat32m1_t v3, vfloat32m1_t v4, vfloat32m1_t v5, vfloat32m1_t v6, vfloat32m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f32m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg3ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f32.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg4ei64_v_f32m2_m (vbool16_t mask, float *base, vuint64m4_t bindex, vfloat32m2_t v0, vfloat32m2_t v1, vfloat32m2_t v2, vfloat32m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f32m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], float* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg2ei64_v_f32m4_m (vbool8_t mask, float *base, vuint64m8_t bindex, vfloat32m4_t v0, vfloat32m4_t v1, size_t vl) { + return vsuxseg2ei64_v_f32m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg5ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg6ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg7ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg8ei8_v_f64m1_m (vbool64_t mask, double *base, vuint8mf8_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei8_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei32_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg3ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i8.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei8_v_f64m2_m (vbool32_t mask, double *base, vuint8mf4_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei8_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei8_v_f64m4_m (vbool16_t mask, double *base, vuint8mf2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei8_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei32_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei16_v_f64m1_m (vbool64_t mask, double *base, vuint16mf4_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei16_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei32_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f16m4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t v0, vfloat16m4_t v1, size_t vl) { - return vsuxseg2ei32_v_f16m4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i16.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf4_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg4ei16_v_f64m2_m (vbool32_t mask, double *base, vuint16mf2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei16_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg2ei16_v_f64m4_m (vbool16_t mask, double *base, vuint16m1_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei16_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg5ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf4_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f16.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t v0, vfloat16mf4_t v1, vfloat16mf4_t v2, vfloat16mf4_t v3, vfloat16mf4_t v4, vfloat16mf4_t v5, vfloat16mf4_t v6, vfloat16mf4_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf4_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg6ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16mf2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg7ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg8ei32_v_f64m1_m (vbool64_t mask, double *base, vuint32mf2_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei32_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, size_t vl) { - return vsuxseg5ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg3ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i32.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, size_t vl) { - return vsuxseg6ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg4ei32_v_f64m2_m (vbool32_t mask, double *base, vuint32m1_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei32_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, size_t vl) { - return vsuxseg7ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg2ei32_v_f64m4_m (vbool16_t mask, double *base, vuint32m2_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei32_v_f64m4_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16mf2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv2f16.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t v0, vfloat16mf2_t v1, vfloat16mf2_t v2, vfloat16mf2_t v3, vfloat16mf2_t v4, vfloat16mf2_t v5, vfloat16mf2_t v6, vfloat16mf2_t v7, size_t vl) { - return vsuxseg8ei64_v_f16mf2_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m1_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m1_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, size_t vl) { + return vsuxseg3ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, size_t vl) { + return vsuxseg4ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg5ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, size_t vl) { + return vsuxseg5ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg5ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg5ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, size_t vl) { - return vsuxseg5ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, vl); +void test_vsuxseg6ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, size_t vl) { + return vsuxseg6ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg6ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg6ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, size_t vl) { - return vsuxseg6ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, vl); +void test_vsuxseg7ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, size_t vl) { + return vsuxseg7ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg7ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv1f64.nxv1i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg7ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, size_t vl) { - return vsuxseg7ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, vl); +void test_vsuxseg8ei64_v_f64m1_m (vbool64_t mask, double *base, vuint64m1_t bindex, vfloat64m1_t v0, vfloat64m1_t v1, vfloat64m1_t v2, vfloat64m1_t v3, vfloat64m1_t v4, vfloat64m1_t v5, vfloat64m1_t v6, vfloat64m1_t v7, size_t vl) { + return vsuxseg8ei64_v_f64m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg8ei64_v_f16m1_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.nxv4f16.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], [[V4:%.*]], [[V5:%.*]], [[V6:%.*]], [[V7:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg8ei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t v0, vfloat16m1_t v1, vfloat16m1_t v2, vfloat16m1_t v3, vfloat16m1_t v4, vfloat16m1_t v5, vfloat16m1_t v6, vfloat16m1_t v7, size_t vl) { - return vsuxseg8ei64_v_f16m1_m(mask, base, bindex, v0, v1, v2, v3, v4, v5, v6, v7, vl); +void test_vsuxseg2ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m2_m(mask, base, bindex, v0, v1, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg2ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, size_t vl) { - return vsuxseg2ei64_v_f16m2_m(mask, base, bindex, v0, v1, vl); +void test_vsuxseg3ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, size_t vl) { + return vsuxseg3ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg3ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv2f64.nxv2i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg3ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, size_t vl) { - return vsuxseg3ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, vl); +void test_vsuxseg4ei64_v_f64m2_m (vbool32_t mask, double *base, vuint64m2_t bindex, vfloat64m2_t v0, vfloat64m2_t v1, vfloat64m2_t v2, vfloat64m2_t v3, size_t vl) { + return vsuxseg4ei64_v_f64m2_m(mask, base, bindex, v0, v1, v2, v3, vl); } -// CHECK-RV64-LABEL: @test_vsuxseg4ei64_v_f16m2_m( +// CHECK-RV64-LABEL: @test_vsuxseg2ei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64.i64( [[V0:%.*]], [[V1:%.*]], [[V2:%.*]], [[V3:%.*]], half* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64.i64( [[V0:%.*]], [[V1:%.*]], double* [[BASE:%.*]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret void // -void test_vsuxseg4ei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t v0, vfloat16m2_t v1, vfloat16m2_t v2, vfloat16m2_t v3, size_t vl) { - return vsuxseg4ei64_v_f16m2_m(mask, base, bindex, v0, v1, v2, v3, vl); +void test_vsuxseg2ei64_v_f64m4_m (vbool16_t mask, double *base, vuint64m4_t bindex, vfloat64m4_t v0, vfloat64m4_t v1, size_t vl) { + return vsuxseg2ei64_v_f64m4_m(mask, base, bindex, v0, v1, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c index db9f710..e6f46ce 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwadd.c @@ -1088,1234 +1088,2167 @@ vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8 (vuint64m8_t op1, uint32_t op2, size_t vl) { return vwaddu_wx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwadd_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vwadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwadd_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vwadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwadd_wv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { return vwadd_wv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwadd_wx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { return vwadd_wx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwadd_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vwadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwadd_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vwadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwadd_wv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { return vwadd_wv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwadd_wx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { return vwadd_wx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwadd_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vwadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwadd_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vwadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwadd_wv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { return vwadd_wv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwadd_wx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { return vwadd_wx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwadd_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vwadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwadd_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vwadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwadd_wv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { return vwadd_wv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwadd_wx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { return vwadd_wx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwadd_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vwadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwadd_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vwadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwadd_wv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { return vwadd_wv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwadd_wx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { return vwadd_wx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwadd_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vwadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwadd_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vwadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwadd_wv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { return vwadd_wv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwadd_wx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { return vwadd_wx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwadd_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vwadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwadd_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vwadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwadd_wv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { return vwadd_wv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwadd_wx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { return vwadd_wx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwadd_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vwadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwadd_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vwadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwadd_wv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { return vwadd_wv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwadd_wx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { return vwadd_wx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwadd_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vwadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwadd_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vwadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwadd_wv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { return vwadd_wv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwadd_wx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { return vwadd_wx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwadd_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vwadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwadd_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vwadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwadd_wv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { return vwadd_wv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwadd_wx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { return vwadd_wx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwadd_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vwadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwadd_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vwadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwadd_wv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { return vwadd_wv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwadd_wx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { return vwadd_wx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwadd_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vwadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwadd_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vwadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwadd_wv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { return vwadd_wv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwadd_wx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { return vwadd_wx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwadd_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vwadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwadd_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vwadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwadd_wv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { return vwadd_wv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwadd_wx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { return vwadd_wx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwadd_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vwadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwadd_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vwadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwadd_wv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { return vwadd_wv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwadd_wx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { return vwadd_wx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwadd_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vwadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwadd_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vwadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwadd_wv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { return vwadd_wv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwadd_wx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { return vwadd_wx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vwaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwaddu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vwaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { return vwaddu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwaddu_wx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { return vwaddu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vwaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwaddu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vwaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { return vwaddu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwaddu_wx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { return vwaddu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwaddu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vwaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwaddu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vwaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwaddu_wv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { return vwaddu_wv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwaddu_wx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { return vwaddu_wx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwaddu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vwaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwaddu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vwaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwaddu_wv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { return vwaddu_wv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwaddu_wx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { return vwaddu_wx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwaddu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vwaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwaddu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vwaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwaddu_wv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { return vwaddu_wv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwaddu_wx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { return vwaddu_wx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwaddu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vwaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwaddu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vwaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwaddu_wv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { return vwaddu_wv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwaddu_wx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { return vwaddu_wx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vwaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vwaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vwaddu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwaddu_wx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { return vwaddu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vwaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwaddu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vwaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwaddu_wv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { return vwaddu_wv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwaddu_wx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { return vwaddu_wx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwaddu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vwaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwaddu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vwaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwaddu_wv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { return vwaddu_wv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwaddu_wx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { return vwaddu_wx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwaddu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vwaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwaddu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vwaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwaddu_wv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { return vwaddu_wv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwaddu_wx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { return vwaddu_wx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwaddu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vwaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwaddu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vwaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwaddu_wv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { return vwaddu_wv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwaddu_wx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { return vwaddu_wx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vwaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwaddu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vwaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwaddu_wv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { return vwaddu_wv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwaddu_wx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { return vwaddu_wx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwaddu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vwaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwaddu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vwaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwaddu_wv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { return vwaddu_wv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwaddu_wx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { return vwaddu_wx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwaddu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vwaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwaddu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vwaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwaddu_wv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { return vwaddu_wv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwaddu_wx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { return vwaddu_wx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwaddu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vwaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwaddu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vwaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwaddu_wv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { return vwaddu_wv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { return vwaddu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwadd_wx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwadd_wx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwadd_wx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwadd_wx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwadd_wx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwadd_wx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwadd_wx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwadd_wx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwadd_wx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwadd_wx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwadd_wx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwadd_wx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwadd_wx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwadd_wx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vwadd_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vwadd_wv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwadd_wx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwadd_wx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwaddu_wx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwaddu_wx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwaddu_wx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwaddu_wx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwaddu_wx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwaddu_wx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwaddu_wx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwaddu_wx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwaddu_wx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwaddu_wx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwaddu_wx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwaddu_wx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwaddu_wx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwaddu_wx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vwaddu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vwaddu_wv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwaddu_wx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwaddu_wx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c index 07e79e9..c920c18 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c @@ -276,7 +276,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) { // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) { @@ -285,7 +285,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) { @@ -294,7 +294,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) { @@ -303,7 +303,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) { @@ -312,7 +312,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) { @@ -321,7 +321,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) { @@ -330,7 +330,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { @@ -339,7 +339,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { @@ -348,7 +348,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) { @@ -357,7 +357,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vu // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) { @@ -366,7 +366,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vui // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) { @@ -375,7 +375,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vui // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) { @@ -384,7 +384,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vui // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) { @@ -393,7 +393,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) { @@ -402,7 +402,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint1 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) { @@ -411,7 +411,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint1 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) { @@ -420,7 +420,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) { @@ -429,7 +429,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -438,7 +438,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) { @@ -447,7 +447,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vu // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) { @@ -456,7 +456,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vu // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) { @@ -465,7 +465,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vui // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) { @@ -474,7 +474,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vui // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) { @@ -483,7 +483,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint3 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) { @@ -492,7 +492,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint3 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) { @@ -501,7 +501,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint3 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) { @@ -510,7 +510,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -519,7 +519,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vu // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) { @@ -528,7 +528,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vu // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) { @@ -537,10 +537,280 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vu // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) { return vwcvtu_x_x_v_u64m8_m(mask, maskedoff, src, vl); } +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwcvt_x_x_v_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwcvt_x_x_v_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwcvt_x_x_v_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwcvt_x_x_v_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwcvt_x_x_v_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwcvt_x_x_v_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u16mf4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u16mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwcvtu_x_x_v_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u16m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwcvtu_x_x_v_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u16m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwcvtu_x_x_v_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u16m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwcvtu_x_x_v_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u16m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwcvt_x_x_v_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwcvt_x_x_v_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwcvt_x_x_v_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwcvt_x_x_v_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwcvt_x_x_v_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u32mf2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwcvtu_x_x_v_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u32m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwcvtu_x_x_v_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u32m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwcvtu_x_x_v_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u32m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwcvtu_x_x_v_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u32m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwcvt_x_x_v_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwcvt_x_x_v_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwcvt_x_x_v_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwcvt_x_x_v_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl, uint8_t ta) { + return vwcvt_x_x_v_i64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwcvtu_x_x_v_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u64m1_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwcvtu_x_x_v_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u64m2_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwcvtu_x_x_v_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u64m4_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 0, [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwcvtu_x_x_v_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl, uint8_t ta) { + return vwcvtu_x_x_v_u64m8_mt(mask, maskedoff, src, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c index 181125f..8d8a12ff 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmul.c @@ -810,816 +810,1627 @@ vint64m8_t test_vwmulsu_vv_i64m8(vint32m4_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.nxv8i64.nxv8i32.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vx_i64m8(vint32m4_t op1, uint32_t op2, size_t vl) { +vint64m8_t test_vwmulsu_vx_i64m8 (vint32m4_t op1, uint32_t op2, size_t vl) { return vwmulsu_vx_i64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmul_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwmul_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vwmul_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmul_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwmul_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vwmul_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmul_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwmul_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vwmul_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmul_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwmul_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vwmul_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwmul_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vwmul_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwmul_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vwmul_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwmul_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vwmul_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwmul_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vwmul_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwmul_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vwmul_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwmul_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vwmul_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwmul_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vwmul_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwmul_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vwmul_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmul_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint32mf2_t test_vwmul_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vwmul_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmul_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwmul_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vwmul_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwmul_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vwmul_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwmul_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vwmul_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwmul_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vwmul_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwmul_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vwmul_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwmul_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vwmul_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwmul_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vwmul_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwmul_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vwmul_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwmul_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vwmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwmul_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vwmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwmul_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vwmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwmul_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vwmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwmul_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vwmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwmul_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vwmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwmul_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vwmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwmul_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vwmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwmul_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vwmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwmulu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint16mf4_t test_vwmulu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vwmulu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwmulu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwmulu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vwmulu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwmulu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint16mf2_t test_vwmulu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vwmulu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwmulu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwmulu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vwmulu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwmulu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint16m1_t test_vwmulu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vwmulu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwmulu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwmulu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vwmulu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwmulu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwmulu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vwmulu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwmulu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwmulu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vwmulu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwmulu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwmulu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vwmulu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwmulu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwmulu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vwmulu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwmulu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwmulu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vwmulu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwmulu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwmulu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vwmulu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwmulu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint32mf2_t test_vwmulu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vwmulu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwmulu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint32mf2_t test_vwmulu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vwmulu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwmulu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint32m1_t test_vwmulu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vwmulu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwmulu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwmulu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vwmulu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwmulu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint32m2_t test_vwmulu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vwmulu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwmulu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwmulu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vwmulu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwmulu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint32m4_t test_vwmulu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vwmulu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwmulu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwmulu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vwmulu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwmulu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint32m8_t test_vwmulu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vwmulu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwmulu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwmulu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vwmulu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwmulu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint64m1_t test_vwmulu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vwmulu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwmulu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwmulu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vwmulu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwmulu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint64m2_t test_vwmulu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vwmulu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwmulu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwmulu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vwmulu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwmulu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint64m4_t test_vwmulu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vwmulu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwmulu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwmulu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vwmulu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwmulu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint64m8_t test_vwmulu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vwmulu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwmulu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwmulu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vwmulu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmulsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwmulsu_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vwmulsu_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwmulsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { +vint16mf4_t test_vwmulsu_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl) { return vwmulsu_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmulsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwmulsu_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vwmulsu_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwmulsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { +vint16mf2_t test_vwmulsu_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl) { return vwmulsu_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmulsu_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vint16m1_t test_vwmulsu_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vwmulsu_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwmulsu_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { +vint16m1_t test_vwmulsu_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl) { return vwmulsu_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmulsu_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { +vint16m2_t test_vwmulsu_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl) { return vwmulsu_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwmulsu_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { +vint16m2_t test_vwmulsu_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl) { return vwmulsu_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmulsu_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { +vint16m4_t test_vwmulsu_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl) { return vwmulsu_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwmulsu_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { +vint16m4_t test_vwmulsu_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl) { return vwmulsu_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmulsu_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { +vint16m8_t test_vwmulsu_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl) { return vwmulsu_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwmulsu_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { +vint16m8_t test_vwmulsu_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl) { return vwmulsu_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmulsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vint32mf2_t test_vwmulsu_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vwmulsu_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwmulsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { +vint32mf2_t test_vwmulsu_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl) { return vwmulsu_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmulsu_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vint32m1_t test_vwmulsu_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vwmulsu_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwmulsu_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { +vint32m1_t test_vwmulsu_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl) { return vwmulsu_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmulsu_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { +vint32m2_t test_vwmulsu_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl) { return vwmulsu_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwmulsu_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { +vint32m2_t test_vwmulsu_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl) { return vwmulsu_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmulsu_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { +vint32m4_t test_vwmulsu_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl) { return vwmulsu_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwmulsu_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { +vint32m4_t test_vwmulsu_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl) { return vwmulsu_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmulsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { +vint32m8_t test_vwmulsu_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl) { return vwmulsu_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwmulsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { +vint32m8_t test_vwmulsu_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl) { return vwmulsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmulsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vint64m1_t test_vwmulsu_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vwmulsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwmulsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { +vint64m1_t test_vwmulsu_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl) { return vwmulsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmulsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { +vint64m2_t test_vwmulsu_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl) { return vwmulsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwmulsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { +vint64m2_t test_vwmulsu_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl) { return vwmulsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmulsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { +vint64m4_t test_vwmulsu_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl) { return vwmulsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwmulsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { +vint64m4_t test_vwmulsu_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl) { return vwmulsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { +vint64m8_t test_vwmulsu_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl) { return vwmulsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwmulsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { +vint64m8_t test_vwmulsu_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl) { return vwmulsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmul_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmul_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmul_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmul_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmul_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmul_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmul_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmul_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmul_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmul_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmul_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmul_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmul_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmul_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmul_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmul_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmul_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmul_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmul_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmul_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmul_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmul_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmul_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmul_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmul_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmul_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmul_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmul_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vwmul_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmul_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmul.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmul_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwmul_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwmulu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwmulu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwmulu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwmulu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwmulu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwmulu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwmulu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwmulu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwmulu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwmulu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwmulu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwmulu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwmulu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwmulu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwmulu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwmulu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwmulu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwmulu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwmulu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwmulu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwmulu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwmulu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwmulu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwmulu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwmulu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwmulu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwmulu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwmulu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vwmulu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwmulu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmulsu_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwmulsu_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmulsu_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwmulsu_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmulsu_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwmulsu_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmulsu_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwmulsu_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmulsu_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwmulsu_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmulsu_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwmulsu_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmulsu_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwmulsu_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmulsu_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwmulsu_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmulsu_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwmulsu_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmulsu_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwmulsu_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmulsu_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwmulsu_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwmulsu_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmulsu_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwmulsu_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmulsu_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwmulsu_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmulsu_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwmulsu_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwmulsu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwmulsu_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwmulsu_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c index aab1f15..045ec44 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwsub.c @@ -1088,1234 +1088,2167 @@ vuint64m8_t test_vwsubu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.nxv8i64.i32.i64( [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8 (vuint64m8_t op1, uint32_t op2, size_t vl) { return vwsubu_wx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwsub_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vwsub_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint8mf8_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwsub_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vwsub_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, vint8mf8_t op2, size_t vl) { +vint16mf4_t test_vwsub_wv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) { return vwsub_wv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vwsub_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, - vint16mf4_t op1, int8_t op2, size_t vl) { +vint16mf4_t test_vwsub_wx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) { return vwsub_wx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwsub_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vwsub_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint8mf4_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwsub_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vwsub_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, vint8mf4_t op2, size_t vl) { +vint16mf2_t test_vwsub_wv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) { return vwsub_wv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vwsub_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, - vint16mf2_t op1, int8_t op2, size_t vl) { +vint16mf2_t test_vwsub_wx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) { return vwsub_wx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwsub_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vwsub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint8mf2_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwsub_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vwsub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, vint8mf2_t op2, size_t vl) { +vint16m1_t test_vwsub_wv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) { return vwsub_wv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwsub_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, - vint16m1_t op1, int8_t op2, size_t vl) { +vint16m1_t test_vwsub_wx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) { return vwsub_wx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwsub_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vwsub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint8m1_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwsub_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vwsub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, vint8m1_t op2, size_t vl) { +vint16m2_t test_vwsub_wv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) { return vwsub_wv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vwsub_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, - vint16m2_t op1, int8_t op2, size_t vl) { +vint16m2_t test_vwsub_wx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) { return vwsub_wx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwsub_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vwsub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint8m2_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwsub_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vwsub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, vint8m2_t op2, size_t vl) { +vint16m4_t test_vwsub_wv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) { return vwsub_wv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vwsub_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, - vint16m4_t op1, int8_t op2, size_t vl) { +vint16m4_t test_vwsub_wx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) { return vwsub_wx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwsub_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vwsub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint8m4_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwsub_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vwsub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, vint8m4_t op2, size_t vl) { +vint16m8_t test_vwsub_wv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) { return vwsub_wv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vwsub_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, - vint16m8_t op1, int8_t op2, size_t vl) { +vint16m8_t test_vwsub_wx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) { return vwsub_wx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwsub_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vwsub_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint16mf4_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwsub_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vwsub_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, vint16mf4_t op2, - size_t vl) { +vint32mf2_t test_vwsub_wv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) { return vwsub_wv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vwsub_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, - vint32mf2_t op1, int16_t op2, size_t vl) { +vint32mf2_t test_vwsub_wx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) { return vwsub_wx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwsub_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vwsub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint16mf2_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwsub_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vwsub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, vint16mf2_t op2, size_t vl) { +vint32m1_t test_vwsub_wv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) { return vwsub_wv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwsub_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, - vint32m1_t op1, int16_t op2, size_t vl) { +vint32m1_t test_vwsub_wx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) { return vwsub_wx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwsub_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vwsub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint16m1_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwsub_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vwsub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, vint16m1_t op2, size_t vl) { +vint32m2_t test_vwsub_wv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) { return vwsub_wv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vwsub_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, - vint32m2_t op1, int16_t op2, size_t vl) { +vint32m2_t test_vwsub_wx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) { return vwsub_wx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwsub_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vwsub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint16m2_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwsub_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vwsub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, vint16m2_t op2, size_t vl) { +vint32m4_t test_vwsub_wv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) { return vwsub_wv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vwsub_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, - vint32m4_t op1, int16_t op2, size_t vl) { +vint32m4_t test_vwsub_wx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) { return vwsub_wx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwsub_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vwsub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint16m4_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwsub_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vwsub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, vint16m4_t op2, size_t vl) { +vint32m8_t test_vwsub_wv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) { return vwsub_wv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vwsub_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, - vint32m8_t op1, int16_t op2, size_t vl) { +vint32m8_t test_vwsub_wx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) { return vwsub_wx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwsub_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vwsub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint32mf2_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwsub_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vwsub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, vint32mf2_t op2, size_t vl) { +vint64m1_t test_vwsub_wv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) { return vwsub_wv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwsub_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, - vint64m1_t op1, int32_t op2, size_t vl) { +vint64m1_t test_vwsub_wx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) { return vwsub_wx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwsub_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vwsub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint32m1_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwsub_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vwsub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, vint32m1_t op2, size_t vl) { +vint64m2_t test_vwsub_wv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) { return vwsub_wv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vwsub_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, - vint64m2_t op1, int32_t op2, size_t vl) { +vint64m2_t test_vwsub_wx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) { return vwsub_wx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwsub_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vwsub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint32m2_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwsub_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vwsub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, vint32m2_t op2, size_t vl) { +vint64m4_t test_vwsub_wv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) { return vwsub_wv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vwsub_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, - vint64m4_t op1, int32_t op2, size_t vl) { +vint64m4_t test_vwsub_wx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) { return vwsub_wx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwsub_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vwsub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint32m4_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwsub_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vwsub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, vint32m4_t op2, size_t vl) { +vint64m8_t test_vwsub_wv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) { return vwsub_wv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vwsub_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, - vint64m8_t op1, int32_t op2, size_t vl) { +vint64m8_t test_vwsub_wx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) { return vwsub_wx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vwsubu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwsubu_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vwsubu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, vuint8mf8_t op2, - size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) { return vwsubu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vwsubu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint16mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf4_t test_vwsubu_wx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) { return vwsubu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vwsubu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwsubu_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vwsubu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, vuint8mf4_t op2, - size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) { return vwsubu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vwsubu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint16mf2_t op1, uint8_t op2, size_t vl) { +vuint16mf2_t test_vwsubu_wx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) { return vwsubu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwsubu_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vwsubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwsubu_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vwsubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, vuint8mf2_t op2, - size_t vl) { +vuint16m1_t test_vwsubu_wv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) { return vwsubu_wv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwsubu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint16m1_t op1, uint8_t op2, size_t vl) { +vuint16m1_t test_vwsubu_wx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) { return vwsubu_wx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwsubu_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vwsubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwsubu_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vwsubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, vuint8m1_t op2, size_t vl) { +vuint16m2_t test_vwsubu_wv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) { return vwsubu_wv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vwsubu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint16m2_t op1, uint8_t op2, size_t vl) { +vuint16m2_t test_vwsubu_wx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) { return vwsubu_wx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwsubu_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vwsubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwsubu_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vwsubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, vuint8m2_t op2, size_t vl) { +vuint16m4_t test_vwsubu_wv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) { return vwsubu_wv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vwsubu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint16m4_t op1, uint8_t op2, size_t vl) { +vuint16m4_t test_vwsubu_wx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) { return vwsubu_wx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwsubu_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vwsubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwsubu_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vwsubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, vuint8m4_t op2, size_t vl) { +vuint16m8_t test_vwsubu_wv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) { return vwsubu_wv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vwsubu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint16m8_t op1, uint8_t op2, size_t vl) { +vuint16m8_t test_vwsubu_wx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) { return vwsubu_wx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vwsubu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vwsubu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, vuint16mf4_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) { return vwsubu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vwsubu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint32mf2_t op1, uint16_t op2, - size_t vl) { +vuint32mf2_t test_vwsubu_wx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) { return vwsubu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vwsubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwsubu_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vwsubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, vuint16mf2_t op2, - size_t vl) { +vuint32m1_t test_vwsubu_wv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) { return vwsubu_wv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwsubu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint32m1_t op1, uint16_t op2, size_t vl) { +vuint32m1_t test_vwsubu_wx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) { return vwsubu_wx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwsubu_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vwsubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwsubu_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vwsubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, vuint16m1_t op2, - size_t vl) { +vuint32m2_t test_vwsubu_wv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) { return vwsubu_wv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vwsubu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint32m2_t op1, uint16_t op2, size_t vl) { +vuint32m2_t test_vwsubu_wx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) { return vwsubu_wx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwsubu_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vwsubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwsubu_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vwsubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, vuint16m2_t op2, - size_t vl) { +vuint32m4_t test_vwsubu_wv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) { return vwsubu_wv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vwsubu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint32m4_t op1, uint16_t op2, size_t vl) { +vuint32m4_t test_vwsubu_wx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) { return vwsubu_wx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwsubu_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vwsubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwsubu_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vwsubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, vuint16m4_t op2, - size_t vl) { +vuint32m8_t test_vwsubu_wv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) { return vwsubu_wv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vwsubu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint32m8_t op1, uint16_t op2, size_t vl) { +vuint32m8_t test_vwsubu_wx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) { return vwsubu_wx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vwsubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwsubu_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vwsubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, vuint32mf2_t op2, - size_t vl) { +vuint64m1_t test_vwsubu_wv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) { return vwsubu_wv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwsubu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint64m1_t op1, uint32_t op2, size_t vl) { +vuint64m1_t test_vwsubu_wx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) { return vwsubu_wx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwsubu_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vwsubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwsubu_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vwsubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, vuint32m1_t op2, - size_t vl) { +vuint64m2_t test_vwsubu_wv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) { return vwsubu_wv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vwsubu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint64m2_t op1, uint32_t op2, size_t vl) { +vuint64m2_t test_vwsubu_wx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) { return vwsubu_wx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwsubu_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vwsubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwsubu_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vwsubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, vuint32m2_t op2, - size_t vl) { +vuint64m4_t test_vwsubu_wv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) { return vwsubu_wv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vwsubu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint64m4_t op1, uint32_t op2, size_t vl) { +vuint64m4_t test_vwsubu_wx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) { return vwsubu_wx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwsubu_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vwsubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwsubu_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vwsubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, vuint32m4_t op2, - size_t vl) { +vuint64m8_t test_vwsubu_wv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) { return vwsubu_wv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vwsubu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint64m8_t op1, uint32_t op2, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) { return vwsubu_wx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vwsub_wx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vwsub_wx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vwsub_wx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vwsub_wx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vwsub_wx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vwsub_wx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vwsub_wx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vwsub_wx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vwsub_wx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vwsub_wx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vwsub_wx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vwsub_wx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vwsub_wx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vwsub_wx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vwsub_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vwsub_wv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsub_wx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsub.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vwsub_wx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vwsub_wx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vwsubu_wx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vwsubu_wx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vwsubu_wx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vwsubu_wx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vwsubu_wx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv32i16.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vwsubu_wx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vwsubu_wx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vwsubu_wx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vwsubu_wx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vwsubu_wx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv16i32.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vwsubu_wx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv1i64.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv1i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vwsubu_wx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv2i64.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv2i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vwsubu_wx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv4i64.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv4i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vwsubu_wx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vwsubu_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.mask.nxv8i64.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vwsubu_wv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vwsubu_wx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vwsubu.w.mask.nxv8i64.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vwsubu_wx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vwsubu_wx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c index 83c9997..1642d28 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vxor.c @@ -792,798 +792,1591 @@ vuint64m8_t test_vxor_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.nxv8i64.i64.i64( [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vxor_vx_u64m8 (vuint64m8_t op1, uint64_t op2, size_t vl) { return vxor_vx_u64m8(op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vxor_vv_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { +vint8mf8_t test_vxor_vv_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) { return vxor_vv_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vxor_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { +vint8mf8_t test_vxor_vx_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) { return vxor_vx_i8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vxor_vv_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { +vint8mf4_t test_vxor_vv_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) { return vxor_vv_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vxor_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { +vint8mf4_t test_vxor_vx_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) { return vxor_vx_i8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vxor_vv_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { +vint8mf2_t test_vxor_vv_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) { return vxor_vv_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vxor_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { +vint8mf2_t test_vxor_vx_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) { return vxor_vx_i8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vxor_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { +vint8m1_t test_vxor_vv_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { return vxor_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vxor_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { +vint8m1_t test_vxor_vx_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { return vxor_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vxor_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { +vint8m2_t test_vxor_vv_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { return vxor_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vxor_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { +vint8m2_t test_vxor_vx_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { return vxor_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vxor_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { +vint8m4_t test_vxor_vv_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { return vxor_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vxor_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { +vint8m4_t test_vxor_vx_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { return vxor_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vxor_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { +vint8m8_t test_vxor_vv_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { return vxor_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vxor_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { +vint8m8_t test_vxor_vx_i8m8_m (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { return vxor_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vxor_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { +vint16mf4_t test_vxor_vv_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) { return vxor_vv_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vxor_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { +vint16mf4_t test_vxor_vx_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) { return vxor_vx_i16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vxor_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { +vint16mf2_t test_vxor_vv_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) { return vxor_vv_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vxor_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { +vint16mf2_t test_vxor_vx_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) { return vxor_vx_i16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vxor_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { +vint16m1_t test_vxor_vv_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { return vxor_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vxor_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { +vint16m1_t test_vxor_vx_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { return vxor_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vxor_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { +vint16m2_t test_vxor_vv_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { return vxor_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vxor_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { +vint16m2_t test_vxor_vx_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { return vxor_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vxor_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { +vint16m4_t test_vxor_vv_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { return vxor_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vxor_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { +vint16m4_t test_vxor_vx_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { return vxor_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vxor_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { +vint16m8_t test_vxor_vv_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { return vxor_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vxor_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { +vint16m8_t test_vxor_vx_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { return vxor_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vxor_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { +vint32mf2_t test_vxor_vv_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) { return vxor_vv_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vxor_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { +vint32mf2_t test_vxor_vx_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) { return vxor_vx_i32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vxor_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { +vint32m1_t test_vxor_vv_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { return vxor_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vxor_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { +vint32m1_t test_vxor_vx_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { return vxor_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vxor_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { +vint32m2_t test_vxor_vv_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { return vxor_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vxor_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { +vint32m2_t test_vxor_vx_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { return vxor_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vxor_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { +vint32m4_t test_vxor_vv_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { return vxor_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vxor_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { +vint32m4_t test_vxor_vx_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { return vxor_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vxor_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { +vint32m8_t test_vxor_vv_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { return vxor_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vxor_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { +vint32m8_t test_vxor_vx_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { return vxor_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vxor_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { +vint64m1_t test_vxor_vv_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { return vxor_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vxor_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { +vint64m1_t test_vxor_vx_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { return vxor_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vxor_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { +vint64m2_t test_vxor_vv_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { return vxor_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vxor_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { +vint64m2_t test_vxor_vx_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { return vxor_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vxor_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { +vint64m4_t test_vxor_vv_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { return vxor_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vxor_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { +vint64m4_t test_vxor_vx_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { return vxor_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vxor_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { +vint64m8_t test_vxor_vv_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { return vxor_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_i64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vxor_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { +vint64m8_t test_vxor_vx_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { return vxor_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vxor_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { +vuint8mf8_t test_vxor_vv_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) { return vxor_vv_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vxor_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { +vuint8mf8_t test_vxor_vx_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8mf8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vxor_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { +vuint8mf4_t test_vxor_vv_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) { return vxor_vv_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vxor_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { +vuint8mf4_t test_vxor_vx_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vxor_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { +vuint8mf2_t test_vxor_vv_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) { return vxor_vv_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vxor_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { +vuint8mf2_t test_vxor_vx_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vxor_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { +vuint8m1_t test_vxor_vv_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { return vxor_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vxor_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { +vuint8m1_t test_vxor_vx_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vxor_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { +vuint8m2_t test_vxor_vv_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { return vxor_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vxor_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { +vuint8m2_t test_vxor_vx_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vxor_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { +vuint8m4_t test_vxor_vv_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { return vxor_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vxor_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { +vuint8m4_t test_vxor_vx_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vxor_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { +vuint8m8_t test_vxor_vv_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { return vxor_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u8m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vxor_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { +vuint8m8_t test_vxor_vx_u8m8_m (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { return vxor_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vxor_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { +vuint16mf4_t test_vxor_vv_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) { return vxor_vv_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vxor_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { +vuint16mf4_t test_vxor_vx_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) { return vxor_vx_u16mf4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vxor_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { +vuint16mf2_t test_vxor_vv_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) { return vxor_vv_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vxor_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { +vuint16mf2_t test_vxor_vx_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) { return vxor_vx_u16mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vxor_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { +vuint16m1_t test_vxor_vv_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { return vxor_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vxor_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { +vuint16m1_t test_vxor_vx_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { return vxor_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vxor_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { +vuint16m2_t test_vxor_vv_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { return vxor_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vxor_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { +vuint16m2_t test_vxor_vx_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { return vxor_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vxor_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { +vuint16m4_t test_vxor_vv_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { return vxor_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vxor_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { +vuint16m4_t test_vxor_vx_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { return vxor_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vxor_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { +vuint16m8_t test_vxor_vv_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { return vxor_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vxor_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { +vuint16m8_t test_vxor_vx_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { return vxor_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vxor_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { +vuint32mf2_t test_vxor_vv_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) { return vxor_vv_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vxor_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { +vuint32mf2_t test_vxor_vx_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) { return vxor_vx_u32mf2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vxor_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { +vuint32m1_t test_vxor_vv_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { return vxor_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vxor_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { +vuint32m1_t test_vxor_vx_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { return vxor_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vxor_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { +vuint32m2_t test_vxor_vv_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { return vxor_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vxor_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { +vuint32m2_t test_vxor_vx_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { return vxor_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vxor_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { +vuint32m4_t test_vxor_vv_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { return vxor_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vxor_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { +vuint32m4_t test_vxor_vx_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { return vxor_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vxor_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { +vuint32m8_t test_vxor_vv_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { return vxor_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vxor_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { +vuint32m8_t test_vxor_vx_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { return vxor_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vxor_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { +vuint64m1_t test_vxor_vv_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { return vxor_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vxor_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { +vuint64m1_t test_vxor_vx_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { return vxor_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vxor_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { +vuint64m2_t test_vxor_vv_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { return vxor_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vxor_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { +vuint64m2_t test_vxor_vx_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { return vxor_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vxor_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { +vuint64m4_t test_vxor_vv_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { return vxor_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vxor_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { +vuint64m4_t test_vxor_vx_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { return vxor_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vv_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { +vuint64m8_t test_vxor_vv_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { return vxor_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: @test_vxor_vx_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vxor_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { +vuint64m8_t test_vxor_vx_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { return vxor_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } + +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vv_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vxor_vx_i8mf8_mt (vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vv_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vxor_vx_i8mf4_mt (vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vv_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vxor_vx_i8mf2_mt (vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vv_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vxor_vx_i8m1_mt (vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vv_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vxor_vx_i8m2_mt (vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vv_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vxor_vx_i8m4_mt (vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vv_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vxor_vx_i8m8_mt (vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vv_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vxor_vx_i16mf4_mt (vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vv_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vxor_vx_i16mf2_mt (vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vv_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vxor_vx_i16m1_mt (vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vv_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vxor_vx_i16m2_mt (vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vv_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vxor_vx_i16m4_mt (vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vv_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vxor_vx_i16m8_mt (vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vv_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vxor_vx_i32mf2_mt (vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vv_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vxor_vx_i32m1_mt (vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vv_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vxor_vx_i32m2_mt (vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vv_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vxor_vx_i32m4_mt (vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vv_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vxor_vx_i32m8_mt (vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vv_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vxor_vx_i64m1_mt (vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vv_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vxor_vx_i64m2_mt (vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vv_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vxor_vx_i64m4_mt (vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vv_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_i64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vxor_vx_i64m8_mt (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_i64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vv_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vxor_vx_u8mf8_mt (vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8mf8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vv_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vxor_vx_u8mf4_mt (vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vv_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vxor_vx_u8mf2_mt (vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vv_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vxor_vx_u8m1_mt (vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vv_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vxor_vx_u8m2_mt (vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vv_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vxor_vx_u8m4_mt (vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vv_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u8m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv64i8.i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vxor_vx_u8m8_mt (vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u8m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vv_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vxor_vx_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u16mf4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vv_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vxor_vx_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u16mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vv_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vxor_vx_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u16m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vv_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vxor_vx_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u16m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vv_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vxor_vx_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u16m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vv_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv32i16.i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vxor_vx_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u16m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vv_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vxor_vx_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u32mf2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vv_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vxor_vx_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u32m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vv_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vxor_vx_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u32m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vv_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vxor_vx_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u32m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vv_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv16i32.i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vxor_vx_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u32m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vv_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv1i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vxor_vx_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u64m1_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vv_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv2i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vxor_vx_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u64m2_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vv_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv4i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vxor_vx_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u64m4_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vv_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vv_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl, uint8_t ta) { + return vxor_vv_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vxor_vx_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vxor.mask.nxv8i64.i64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vxor_vx_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl, uint8_t ta) { + return vxor_vx_u64m8_mt(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c index 234ef15..4842719 100644 --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c @@ -252,286 +252,511 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) { // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8 (vuint32m4_t op1, size_t vl) { return vzext_vf2_u64m8(op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, - vuint8mf8_t op1, size_t vl) { +vuint16mf4_t test_vzext_vf2_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl) { return vzext_vf2_u16mf4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, - vuint8mf4_t op1, size_t vl) { +vuint16mf2_t test_vzext_vf2_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl) { return vzext_vf2_u16mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, - vuint8mf2_t op1, size_t vl) { +vuint16m1_t test_vzext_vf2_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl) { return vzext_vf2_u16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, - vuint8m1_t op1, size_t vl) { +vuint16m2_t test_vzext_vf2_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl) { return vzext_vf2_u16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, - vuint8m2_t op1, size_t vl) { +vuint16m4_t test_vzext_vf2_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl) { return vzext_vf2_u16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, - vuint8m4_t op1, size_t vl) { +vuint16m8_t test_vzext_vf2_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl) { return vzext_vf2_u16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint8mf8_t op1, size_t vl) { +vuint32mf2_t test_vzext_vf4_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { return vzext_vf4_u32mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint8mf4_t op1, size_t vl) { +vuint32m1_t test_vzext_vf4_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { return vzext_vf4_u32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint8mf2_t op1, size_t vl) { +vuint32m2_t test_vzext_vf4_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { return vzext_vf4_u32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint8m1_t op1, size_t vl) { +vuint32m4_t test_vzext_vf4_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { return vzext_vf4_u32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint8m2_t op1, size_t vl) { +vuint32m8_t test_vzext_vf4_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { return vzext_vf4_u32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint8mf8_t op1, size_t vl) { +vuint64m1_t test_vzext_vf8_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { return vzext_vf8_u64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint8mf4_t op1, size_t vl) { +vuint64m2_t test_vzext_vf8_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { return vzext_vf8_u64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint8mf2_t op1, size_t vl) { +vuint64m4_t test_vzext_vf8_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { return vzext_vf8_u64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint8m1_t op1, size_t vl) { +vuint64m8_t test_vzext_vf8_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { return vzext_vf8_u64m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, - vuint16mf4_t op1, size_t vl) { +vuint32mf2_t test_vzext_vf2_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl) { return vzext_vf2_u32mf2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, - vuint16mf2_t op1, size_t vl) { +vuint32m1_t test_vzext_vf2_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl) { return vzext_vf2_u32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, - vuint16m1_t op1, size_t vl) { +vuint32m2_t test_vzext_vf2_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl) { return vzext_vf2_u32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, - vuint16m2_t op1, size_t vl) { +vuint32m4_t test_vzext_vf2_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl) { return vzext_vf2_u32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, - vuint16m4_t op1, size_t vl) { +vuint32m8_t test_vzext_vf2_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl) { return vzext_vf2_u32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint16mf4_t op1, size_t vl) { +vuint64m1_t test_vzext_vf4_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { return vzext_vf4_u64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint16mf2_t op1, size_t vl) { +vuint64m2_t test_vzext_vf4_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { return vzext_vf4_u64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint16m1_t op1, size_t vl) { +vuint64m4_t test_vzext_vf4_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { return vzext_vf4_u64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint16m2_t op1, size_t vl) { +vuint64m8_t test_vzext_vf4_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { return vzext_vf4_u64m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, - vuint32mf2_t op1, size_t vl) { +vuint64m1_t test_vzext_vf2_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl) { return vzext_vf2_u64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, - vuint32m1_t op1, size_t vl) { +vuint64m2_t test_vzext_vf2_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl) { return vzext_vf2_u64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, - vuint32m2_t op1, size_t vl) { +vuint64m4_t test_vzext_vf2_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl) { return vzext_vf2_u64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, - vuint32m4_t op1, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl) { return vzext_vf2_u64m8_m(mask, maskedoff, op1, vl); } + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vzext_vf2_u16mf4_mt (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u16mf4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vzext_vf2_u16mf2_mt (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u16mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vzext_vf2_u16m1_mt (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u16m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vzext_vf2_u16m2_mt (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u16m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vzext_vf2_u16m4_mt (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u16m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vzext_vf2_u16m8_mt (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u16m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl, uint8_t ta) { + return vzext_vf8_u64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl, uint8_t ta) { + return vzext_vf8_u64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl, uint8_t ta) { + return vzext_vf8_u64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl, uint8_t ta) { + return vzext_vf8_u64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf2_u32mf2_mt (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u32mf2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf2_u32m1_mt (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u32m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf2_u32m2_mt (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u32m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf2_u32m4_mt (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u32m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf2_u32m8_mt (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u32m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl, uint8_t ta) { + return vzext_vf4_u64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf2_u64m1_mt (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u64m1_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf2_u64m2_mt (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u64m2_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf2_u64m4_mt (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u64m4_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + +// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_mt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf2_u64m8_mt (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, size_t vl, uint8_t ta) { + return vzext_vf2_u64m8_mt(mask, maskedoff, op1, vl, VE_TAIL_AGNOSTIC); +} + diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp index 24f2250..1525006 100644 --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -157,6 +157,7 @@ private: bool IsMask; bool HasMaskedOffOperand; bool HasVL; + bool HasPolicy; bool HasNoMaskedOverloaded; bool HasAutoDef; // There is automiatic definition in header std::string ManualCodegen; @@ -172,7 +173,7 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, - bool HasNoMaskedOverloaded, bool HasAutoDef, + bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, StringRef RequiredExtension, unsigned NF); @@ -183,6 +184,7 @@ public: bool hasSideEffects() const { return HasSideEffects; } bool hasMaskedOffOperand() const { return HasMaskedOffOperand; } bool hasVL() const { return HasVL; } + bool hasPolicy() const { return HasPolicy; } bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; } bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } @@ -199,6 +201,9 @@ public: // init the RVVIntrinsic ID and IntrinsicTypes. void emitCodeGenSwitchBody(raw_ostream &o) const; + // Emit the define macors for mask intrinsics using _mt intrinsics. + void emitIntrinsicMaskMacro(raw_ostream &o) const; + // Emit the macros for mapping C/C++ intrinsic function to builtin functions. void emitIntrinsicMacro(raw_ostream &o) const; @@ -231,6 +236,8 @@ public: private: /// Create all intrinsics and add them to \p Out void createRVVIntrinsics(std::vector> &Out); + /// Create Headers and add them to \p Out + void createRVVHeaders(raw_ostream &OS); /// Compute output and input types by applying different config (basic type /// and LMUL with type transformers). It also record result of type in legal /// or illegal set to avoid compute the same config again. The result maybe @@ -756,15 +763,15 @@ void RVVType::applyModifier(StringRef Transformer) { RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, StringRef NewMangledName, StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, + bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, StringRef RequiredExtension, unsigned NF) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), - HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), - ManualCodegen(ManualCodegen.str()), NF(NF) { + HasPolicy(HasPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), + HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { // Init Name and MangledName Name = NewName.str(); @@ -778,6 +785,8 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, MangledName += "_" + MangledSuffix.str(); if (IsMask) { Name += "_m"; + if (HasPolicy) + Name += "t"; } // Init RISC-V extensions for (const auto &T : OutInTypes) { @@ -830,7 +839,10 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { if (isMask()) { if (hasVL()) { - OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; + if (hasPolicy()) + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 2);\n"; + else + OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n"; } else { OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n"; } @@ -870,6 +882,24 @@ void RVVIntrinsic::emitIntrinsicMacro(raw_ostream &OS) const { OS << ")\n"; } +void RVVIntrinsic::emitIntrinsicMaskMacro(raw_ostream &OS) const { + OS << "#define " << getName().drop_back() << "("; + if (!InputTypes.empty()) { + ListSeparator LS; + for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) + OS << LS << "op" << i; + } + OS << ") \\\n"; + OS << "__builtin_rvv_" << getName() << "("; + ListSeparator LS; + if (!InputTypes.empty()) { + for (unsigned i = 0, e = InputTypes.size() - 1; i != e; ++i) + OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")"; + } + OS << LS << "(size_t)VE_TAIL_AGNOSTIC"; + OS << ")\n"; +} + void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const { OS << "__attribute__((clang_builtin_alias("; OS << "__builtin_rvv_" << getName() << ")))\n"; @@ -915,6 +945,8 @@ void RVVEmitter::createHeader(raw_ostream &OS) { OS << "extern \"C\" {\n"; OS << "#endif\n\n"; + createRVVHeaders(OS); + std::vector> Defs; createRVVIntrinsics(Defs); @@ -982,6 +1014,12 @@ void RVVEmitter::createHeader(raw_ostream &OS) { Inst.emitIntrinsicMacro(OS); }); + // Use _mt to implement _m intrinsics. + emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) { + if (Inst.isMask() && Inst.hasPolicy()) + Inst.emitIntrinsicMaskMacro(OS); + }); + OS << "#define __riscv_v_intrinsic_overloading 1\n"; // Print Overloaded APIs @@ -1084,6 +1122,7 @@ void RVVEmitter::createRVVIntrinsics( bool HasMask = R->getValueAsBit("HasMask"); bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand"); bool HasVL = R->getValueAsBit("HasVL"); + bool HasPolicy = R->getValueAsBit("HasPolicy"); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); bool HasSideEffects = R->getValueAsBit("HasSideEffects"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); @@ -1138,12 +1177,16 @@ void RVVEmitter::createRVVIntrinsics( ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m"); } } - // If HasVL, append 'z' to last operand + // If HasVL, append 'z' to the operand list. if (HasVL) { ProtoSeq.push_back("z"); ProtoMaskSeq.push_back("z"); } + if (HasPolicy) { + ProtoMaskSeq.push_back("z"); + } + // Create Intrinsics for each type and LMUL. for (char I : TypeRange) { for (int Log2LMUL : Log2LMULList) { @@ -1158,7 +1201,7 @@ void RVVEmitter::createRVVIntrinsics( Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, - HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + HasVL, HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes, RequiredExtension, NF)); if (HasMask) { // Create a mask intrinsic @@ -1167,7 +1210,7 @@ void RVVEmitter::createRVVIntrinsics( Out.push_back(std::make_unique( Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, + HasPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); } } // end for Log2LMULList @@ -1175,6 +1218,15 @@ void RVVEmitter::createRVVIntrinsics( } } +void RVVEmitter::createRVVHeaders(raw_ostream &OS) { + std::vector RVVHeaders = + Records.getAllDerivedDefinitions("RVVHeader"); + for (auto *R : RVVHeaders) { + StringRef HeaderCodeStr = R->getValueAsString("HeaderCode"); + OS << HeaderCodeStr.str(); + } +} + Optional RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, unsigned NF, ArrayRef PrototypeSeq) { -- 2.7.4